2021-10-28 16:21:23 +00:00
|
|
|
from typing import Any, Tuple
|
|
|
|
|
|
|
|
import torch
|
|
|
|
import torch.distributed as dist
|
2021-12-27 07:04:32 +00:00
|
|
|
from colossalai.communication.collective import (all_gather, all_reduce, reduce_scatter)
|
2021-10-28 16:21:23 +00:00
|
|
|
from colossalai.context.parallel_mode import ParallelMode
|
|
|
|
from colossalai.core import global_context as gpc
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
from colossalai.utils import get_current_device
|
2021-12-27 07:04:32 +00:00
|
|
|
from torch import Tensor
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
from torch.cuda.amp import custom_bwd, custom_fwd
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_parallel_group(parallel_mode: ParallelMode):
|
|
|
|
return gpc.get_group(parallel_mode)
|
|
|
|
|
|
|
|
|
|
|
|
def get_global_rank():
|
|
|
|
return gpc.get_global_rank()
|
|
|
|
|
|
|
|
|
|
|
|
def get_parallel_rank(parallel_mode: ParallelMode):
|
|
|
|
return gpc.get_local_rank(parallel_mode)
|
|
|
|
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
class _Classifier2p5D(torch.autograd.Function):
|
2022-04-11 05:44:50 +00:00
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
@staticmethod
|
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
|
|
|
def forward(
|
2022-04-11 05:44:50 +00:00
|
|
|
ctx: Any,
|
|
|
|
A: Tensor,
|
|
|
|
B: Tensor,
|
|
|
|
bias,
|
|
|
|
tesseract_dim: int,
|
|
|
|
out_shape: Tuple[int, ...],
|
|
|
|
row_rank: int,
|
|
|
|
col_rank: int,
|
|
|
|
row_parallel_mode: ParallelMode,
|
|
|
|
col_parallel_mode: ParallelMode,
|
|
|
|
data_parallel_rank: int,
|
|
|
|
pipeline_parallel_rank: int,
|
|
|
|
pipeline_parallel_size: int,
|
|
|
|
tensor_parallel_size: int,
|
2021-12-27 07:04:32 +00:00
|
|
|
) -> Tensor:
|
2022-04-11 05:44:50 +00:00
|
|
|
A = A.clone().detach()
|
2021-12-27 07:04:32 +00:00
|
|
|
A_shape = A.shape
|
|
|
|
A = A.reshape((-1, A_shape[-1]))
|
|
|
|
B_shape = B.shape
|
|
|
|
B = B.reshape((-1, B_shape[-1]))
|
|
|
|
B_temp = all_gather(B, -1, col_parallel_mode)
|
|
|
|
if ctx:
|
|
|
|
ctx.save_for_backward(A, B_temp)
|
|
|
|
|
|
|
|
C = torch.matmul(A, B_temp.transpose(0, 1))
|
|
|
|
|
|
|
|
C = all_reduce(C, row_parallel_mode)
|
|
|
|
|
|
|
|
ctx.use_bias = bias is not None
|
|
|
|
if bias is not None:
|
|
|
|
C = C + bias
|
2021-10-28 16:21:23 +00:00
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
out = C.reshape(out_shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.tesseract_dim = tesseract_dim
|
|
|
|
ctx.row_rank = row_rank
|
|
|
|
ctx.col_rank = col_rank
|
|
|
|
ctx.row_parallel_mode = row_parallel_mode
|
|
|
|
ctx.col_parallel_mode = col_parallel_mode
|
|
|
|
ctx.A_shape = A_shape
|
|
|
|
ctx.B_shape = B_shape
|
|
|
|
ctx.data_parallel_rank = data_parallel_rank
|
|
|
|
ctx.pipeline_parallel_rank = pipeline_parallel_rank
|
|
|
|
ctx.pipeline_parallel_size = pipeline_parallel_size
|
|
|
|
ctx.tensor_parallel_size = tensor_parallel_size
|
|
|
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@custom_bwd
|
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
|
|
|
A, B = ctx.saved_tensors
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
A_grad = torch.matmul(output_grad, B)
|
|
|
|
A_grad = A_grad.reshape(ctx.A_shape)
|
|
|
|
B_grad = torch.matmul(output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), A)
|
|
|
|
B_grad = reduce_scatter(B_grad, -1, ctx.col_parallel_mode)
|
|
|
|
B_grad = B_grad.reshape(ctx.B_shape)
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
if ctx.use_bias:
|
|
|
|
bias_grad = torch.sum(output_grad, dim=tuple(range(output_grad.ndim - 1)))
|
|
|
|
bias_grad = all_reduce(bias_grad, ctx.col_parallel_mode)
|
|
|
|
else:
|
|
|
|
bias_grad = None
|
2021-12-27 07:04:32 +00:00
|
|
|
|
|
|
|
return A_grad, B_grad, bias_grad, None, None, None, None, None, None, None, None, None, None
|
|
|
|
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
def classifier_2p5d(A: Tensor, B: Tensor, bias, tesseract_dim: int, out_shape: Tuple[int,
|
|
|
|
...], row_rank: int, col_rank: int,
|
|
|
|
row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int,
|
|
|
|
pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Classifier.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
A (:class:`torch.tensor`): matrix :math:`A`.
|
|
|
|
B (:class:`torch.tensor`): matrix :math:`B`.
|
|
|
|
bias (:class:`torch.tensor`): matrix of bias.
|
|
|
|
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
|
|
|
|
out_shape (:class:`torch.size`): shape of output tensor.
|
|
|
|
row_rank (int): the rank of row.
|
|
|
|
col_rank (int): the rank of column.
|
|
|
|
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
data_parallel_rank (int): data parallel rank.
|
|
|
|
pipeline_parallel_rank (int): pipeline parallel rank
|
|
|
|
pipeline_parallel_size (int): pipeline parallel size.
|
|
|
|
tensor_parallel_size (int): tensor parallel size.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2022-02-14 03:15:02 +00:00
|
|
|
"""
|
|
|
|
return _Classifier2p5D.apply(A, B, bias, tesseract_dim, out_shape, row_rank, col_rank, row_parallel_mode,
|
|
|
|
col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size,
|
|
|
|
tensor_parallel_size)
|
|
|
|
|
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
class Matmul_AB_2p5D(torch.autograd.Function):
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Matrix multiplication for :math:`C = AB`.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
A (:class:`torch.tensor`): matrix :math:`A`.
|
|
|
|
B (:class:`torch.tensor`): matrix :math:`B`.
|
|
|
|
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
|
|
|
|
out_shape (:class:`torch.size`): shape of output tensor.
|
|
|
|
row_rank (int): the rank of row.
|
|
|
|
col_rank (int): the rank of column.
|
|
|
|
dep_rank (int): the rank of depth.
|
|
|
|
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
data_parallel_rank (int): data parallel rank.
|
|
|
|
pipeline_parallel_rank (int): pipeline parallel rank
|
|
|
|
pipeline_parallel_size (int): pipeline parallel size.
|
|
|
|
tensor_parallel_size (int): tensor parallel size.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2021-12-27 07:04:32 +00:00
|
|
|
"""
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
2021-12-27 07:04:32 +00:00
|
|
|
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
|
|
|
|
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
|
|
|
|
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
|
2021-10-28 16:21:23 +00:00
|
|
|
tensor_parallel_size: int) -> Tensor:
|
|
|
|
# A: [b / dq, s, h / q] -> [(b * s) / dq, h / q]
|
|
|
|
# B: [h / dq, s / q]
|
|
|
|
# C: [b / dq, s, s / q] -> [(b * s) / dq, s / q]
|
|
|
|
|
|
|
|
assert A.shape[-1] == B.shape[-2], \
|
|
|
|
'Invalid shapes: A={}, B={} for AB.'.format(A.shape, B.shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.save_for_backward(A, B)
|
|
|
|
|
|
|
|
A_shape = A.shape
|
2021-12-29 15:32:10 +00:00
|
|
|
A = A.reshape((-1, A_shape[-1]))
|
2021-10-28 16:21:23 +00:00
|
|
|
B_shape = B.shape
|
2021-12-29 15:32:10 +00:00
|
|
|
B = B.reshape((-1, B_shape[-1]))
|
2021-10-28 16:21:23 +00:00
|
|
|
C_shape = (A.shape[0], B.shape[-1])
|
|
|
|
C = torch.zeros(C_shape, dtype=A.dtype, device=get_current_device())
|
|
|
|
|
2021-12-29 15:32:10 +00:00
|
|
|
# use circular buffer to store the communication tensor
|
|
|
|
# 2 is enough for all cases
|
|
|
|
A_list = [torch.empty_like(A) for _ in range(2)]
|
|
|
|
B_list = [torch.empty_like(B) for _ in range(2)]
|
|
|
|
|
|
|
|
row_group = gpc.get_group(row_parallel_mode)
|
|
|
|
col_group = gpc.get_group(col_parallel_mode)
|
|
|
|
|
2022-03-09 13:42:30 +00:00
|
|
|
src_a = \
|
|
|
|
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
|
|
|
src_b = \
|
|
|
|
col_rank + tesseract_dim ** 2 * dep_rank + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
2021-12-29 15:32:10 +00:00
|
|
|
|
|
|
|
opa = [None] * 2
|
|
|
|
opb = [None] * 2
|
|
|
|
|
|
|
|
A_list[0].copy_(A)
|
|
|
|
B_list[0].copy_(B)
|
|
|
|
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
|
|
|
|
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
|
|
|
|
cur = 0
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
for i in range(tesseract_dim):
|
2021-12-29 15:32:10 +00:00
|
|
|
if i != tesseract_dim - 1:
|
|
|
|
A_list[1 - cur].copy_(A)
|
|
|
|
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
|
|
|
|
B_list[1 - cur].copy_(B)
|
|
|
|
opb[1 - cur] = dist.broadcast(B_list[1 - cur],
|
|
|
|
src=src_b + tesseract_dim,
|
|
|
|
group=col_group,
|
|
|
|
async_op=True)
|
|
|
|
|
|
|
|
if opa[cur] is not None:
|
|
|
|
opa[cur].wait()
|
|
|
|
if opb[cur] is not None:
|
|
|
|
opb[cur].wait()
|
|
|
|
|
|
|
|
torch.addmm(C, A_list[cur], B_list[cur], out=C)
|
|
|
|
cur = 1 - cur
|
|
|
|
src_a += 1
|
|
|
|
src_b += tesseract_dim
|
2021-10-28 16:21:23 +00:00
|
|
|
out = C.reshape(out_shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.tesseract_dim = tesseract_dim
|
|
|
|
ctx.row_rank = row_rank
|
|
|
|
ctx.col_rank = col_rank
|
|
|
|
ctx.dep_rank = dep_rank
|
|
|
|
ctx.row_parallel_mode = row_parallel_mode
|
|
|
|
ctx.col_parallel_mode = col_parallel_mode
|
|
|
|
ctx.A_shape = A_shape
|
|
|
|
ctx.B_shape = B_shape
|
|
|
|
ctx.data_parallel_rank = data_parallel_rank
|
|
|
|
ctx.pipeline_parallel_rank = pipeline_parallel_rank
|
|
|
|
ctx.pipeline_parallel_size = pipeline_parallel_size
|
|
|
|
ctx.tensor_parallel_size = tensor_parallel_size
|
|
|
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
2021-10-28 16:21:23 +00:00
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
|
|
|
A, B = ctx.saved_tensors
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
with torch.no_grad():
|
2021-12-27 07:04:32 +00:00
|
|
|
A_grad = Matmul_ABT_2p5D.apply(output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
|
|
|
|
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
|
|
|
|
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
|
|
|
|
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
|
|
|
|
B_grad = Matmul_ATB_2p5D.apply(A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
|
|
|
|
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
|
|
|
|
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
|
|
|
|
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
|
2021-10-28 16:21:23 +00:00
|
|
|
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
class Matmul_ABT_2p5D(torch.autograd.Function):
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Matrix multiplication for :math:`C = AB^T`.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
A (:class:`torch.tensor`): matrix :math:`A`.
|
|
|
|
B (:class:`torch.tensor`): matrix :math:`B`.
|
|
|
|
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
|
|
|
|
out_shape (:class:`torch.size`): shape of output tensor.
|
|
|
|
row_rank (int): the rank of row.
|
|
|
|
col_rank (int): the rank of column.
|
|
|
|
dep_rank (int): the rank of depth.
|
|
|
|
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
data_parallel_rank (int): data parallel rank.
|
|
|
|
pipeline_parallel_rank (int): pipeline parallel rank
|
|
|
|
pipeline_parallel_size (int): pipeline parallel size.
|
|
|
|
tensor_parallel_size (int): tensor parallel size.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2021-10-28 16:21:23 +00:00
|
|
|
"""
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
2021-12-27 07:04:32 +00:00
|
|
|
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
|
|
|
|
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
|
|
|
|
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
|
|
|
|
tensor_parallel_size: int) -> Tensor:
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
assert A.shape[-1] == B.shape[-1], \
|
|
|
|
'Invalid shapes: A={}, B={} for ABT.'.format(A.shape, B.shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.save_for_backward(A, B)
|
|
|
|
|
|
|
|
A_shape = A.shape
|
|
|
|
A = A.reshape((-1, A_shape[-1]))
|
|
|
|
B_shape = B.shape
|
|
|
|
B = B.reshape((-1, B_shape[-1]))
|
|
|
|
C_shape = (A.shape[0], B.shape[0])
|
|
|
|
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
|
|
|
|
|
2021-12-29 15:32:10 +00:00
|
|
|
# use circular buffer to store the communication tensor
|
|
|
|
# 2 is enough for all cases
|
|
|
|
B_list = [torch.empty_like(B) for _ in range(2)]
|
|
|
|
C_list = [torch.empty_like(C) for _ in range(2)]
|
|
|
|
|
|
|
|
row_group = gpc.get_group(row_parallel_mode)
|
|
|
|
col_group = gpc.get_group(col_parallel_mode)
|
|
|
|
|
2022-03-09 13:42:30 +00:00
|
|
|
src_b = \
|
|
|
|
col_rank + tesseract_dim ** 2 * dep_rank + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
|
|
|
src_c = \
|
|
|
|
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
2021-12-29 15:32:10 +00:00
|
|
|
|
|
|
|
opb = [None] * 2
|
|
|
|
opr = [None] * 2
|
|
|
|
|
|
|
|
B_list[0].copy_(B)
|
|
|
|
opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True)
|
|
|
|
cur = 0
|
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
for i in range(tesseract_dim):
|
2021-12-29 15:32:10 +00:00
|
|
|
if i != tesseract_dim - 1:
|
|
|
|
B_list[1 - cur].copy_(B)
|
|
|
|
opb[1 - cur] = dist.broadcast(B_list[1 - cur],
|
|
|
|
src=src_b + tesseract_dim,
|
|
|
|
group=col_group,
|
|
|
|
async_op=True)
|
|
|
|
|
|
|
|
if opr[cur] is not None:
|
|
|
|
opr[cur].wait()
|
|
|
|
if i - 2 == col_rank:
|
|
|
|
C.copy_(C_list[cur])
|
|
|
|
|
|
|
|
if opb[cur] is not None:
|
|
|
|
opb[cur].wait()
|
|
|
|
|
|
|
|
torch.matmul(A, B_list[cur].transpose(0, 1), out=C_list[cur])
|
|
|
|
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=row_group, async_op=True)
|
|
|
|
cur = 1 - cur
|
|
|
|
src_b += tesseract_dim
|
|
|
|
src_c += 1
|
|
|
|
|
|
|
|
for op in opr:
|
|
|
|
op.wait()
|
2021-10-28 16:21:23 +00:00
|
|
|
|
2021-12-29 15:32:10 +00:00
|
|
|
if tesseract_dim - 2 == col_rank:
|
|
|
|
C.copy_(C_list[cur])
|
|
|
|
if tesseract_dim - 1 == col_rank:
|
|
|
|
C.copy_(C_list[1 - cur])
|
2021-10-28 16:21:23 +00:00
|
|
|
out = C.reshape(out_shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.tesseract_dim = tesseract_dim
|
|
|
|
ctx.row_rank = row_rank
|
|
|
|
ctx.col_rank = col_rank
|
|
|
|
ctx.dep_rank = dep_rank
|
|
|
|
ctx.row_parallel_mode = row_parallel_mode
|
|
|
|
ctx.col_parallel_mode = col_parallel_mode
|
|
|
|
ctx.A_shape = A_shape
|
|
|
|
ctx.B_shape = B_shape
|
|
|
|
ctx.data_parallel_rank = data_parallel_rank
|
|
|
|
ctx.pipeline_parallel_rank = pipeline_parallel_rank
|
|
|
|
ctx.pipeline_parallel_size = pipeline_parallel_size
|
|
|
|
ctx.tensor_parallel_size = tensor_parallel_size
|
|
|
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
2021-10-28 16:21:23 +00:00
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
|
|
|
A, B = ctx.saved_tensors
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
with torch.no_grad():
|
2021-12-27 07:04:32 +00:00
|
|
|
A_grad = Matmul_AB_2p5D.apply(output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
|
|
|
|
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
|
|
|
|
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
|
|
|
|
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
|
|
|
|
B_grad = Matmul_ATB_2p5D.apply(output_grad, A, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
|
|
|
|
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
|
|
|
|
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
|
|
|
|
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
|
2021-10-28 16:21:23 +00:00
|
|
|
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
class Matmul_ATB_2p5D(torch.autograd.Function):
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Matrix multiplication for :math:`C = A^TB`
|
|
|
|
|
|
|
|
Args:
|
|
|
|
A (:class:`torch.tensor`): matrix :math:`A`.
|
|
|
|
B (:class:`torch.tensor`): matrix :math:`B`.
|
|
|
|
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
|
|
|
|
out_shape (:class:`torch.size`): shape of output tensor.
|
|
|
|
row_rank (int): the rank of row.
|
|
|
|
col_rank (int): the rank of column.
|
|
|
|
dep_rank (int): the rank of depth.
|
|
|
|
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
data_parallel_rank (int): data parallel rank.
|
|
|
|
pipeline_parallel_rank (int): pipeline parallel rank
|
|
|
|
pipeline_parallel_size (int): pipeline parallel size.
|
|
|
|
tensor_parallel_size (int): tensor parallel size.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2021-10-28 16:21:23 +00:00
|
|
|
"""
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
2021-12-27 07:04:32 +00:00
|
|
|
def forward(ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int,
|
|
|
|
col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode,
|
|
|
|
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
|
2021-10-28 16:21:23 +00:00
|
|
|
tensor_parallel_size: int):
|
|
|
|
|
|
|
|
assert A.shape[-2] == B.shape[-2], \
|
|
|
|
'Invalid shapes: A={}, B={} for ATB.'.format(A.shape, B.shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.save_for_backward(A, B)
|
|
|
|
|
|
|
|
A_shape = A.shape
|
|
|
|
A = A.reshape((-1, A_shape[-1]))
|
|
|
|
B_shape = B.shape
|
|
|
|
B = B.reshape((-1, B_shape[-1]))
|
|
|
|
C_shape = (A.shape[-1], B.shape[-1])
|
|
|
|
C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device())
|
|
|
|
|
2021-12-29 15:32:10 +00:00
|
|
|
# use circular buffer to store the communication tensor
|
|
|
|
# 2 is enough for all cases
|
|
|
|
A_list = [torch.empty_like(A) for _ in range(2)]
|
|
|
|
C_list = [torch.empty_like(C) for _ in range(2)]
|
|
|
|
|
|
|
|
row_group = gpc.get_group(row_parallel_mode)
|
|
|
|
col_group = gpc.get_group(col_parallel_mode)
|
|
|
|
|
2022-03-09 13:42:30 +00:00
|
|
|
src_a = \
|
|
|
|
tesseract_dim * row_rank + tesseract_dim ** 2 * dep_rank + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
|
|
|
src_c = \
|
|
|
|
col_rank + tesseract_dim ** 2 * dep_rank + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
2021-12-29 15:32:10 +00:00
|
|
|
|
|
|
|
opa = [None] * 2
|
|
|
|
opr = [None] * 2
|
|
|
|
|
|
|
|
A_list[0].copy_(A)
|
|
|
|
opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True)
|
|
|
|
cur = 0
|
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
for i in range(tesseract_dim):
|
2021-12-29 15:32:10 +00:00
|
|
|
if i != tesseract_dim - 1:
|
|
|
|
A_list[1 - cur].copy_(A)
|
|
|
|
opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True)
|
|
|
|
|
|
|
|
if opr[cur] is not None:
|
|
|
|
opr[cur].wait()
|
|
|
|
if i - 2 == row_rank:
|
|
|
|
C.copy_(C_list[cur])
|
|
|
|
|
|
|
|
if opa[cur] is not None:
|
|
|
|
opa[cur].wait()
|
2021-10-28 16:21:23 +00:00
|
|
|
|
2021-12-29 15:32:10 +00:00
|
|
|
torch.matmul(A_list[cur].transpose(0, 1), B, out=C_list[cur])
|
|
|
|
opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=col_group, async_op=True)
|
|
|
|
cur = 1 - cur
|
|
|
|
src_a += 1
|
|
|
|
src_c += tesseract_dim
|
|
|
|
|
|
|
|
for op in opr:
|
|
|
|
op.wait()
|
|
|
|
|
|
|
|
if tesseract_dim - 2 == row_rank:
|
|
|
|
C.copy_(C_list[cur])
|
|
|
|
if tesseract_dim - 1 == row_rank:
|
|
|
|
C.copy_(C_list[1 - cur])
|
2021-10-28 16:21:23 +00:00
|
|
|
out = C.reshape(out_shape)
|
|
|
|
|
|
|
|
if ctx:
|
|
|
|
ctx.tesseract_dim = tesseract_dim
|
|
|
|
ctx.row_rank = row_rank
|
|
|
|
ctx.col_rank = col_rank
|
|
|
|
ctx.dep_rank = dep_rank
|
|
|
|
ctx.row_parallel_mode = row_parallel_mode
|
|
|
|
ctx.col_parallel_mode = col_parallel_mode
|
|
|
|
ctx.A_shape = A_shape
|
|
|
|
ctx.B_shape = B_shape
|
|
|
|
ctx.data_parallel_rank = data_parallel_rank
|
|
|
|
ctx.pipeline_parallel_rank = pipeline_parallel_rank
|
|
|
|
ctx.pipeline_parallel_size = pipeline_parallel_size
|
|
|
|
ctx.tensor_parallel_size = tensor_parallel_size
|
|
|
|
|
|
|
|
return out
|
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
2021-10-28 16:21:23 +00:00
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
|
|
|
A, B = ctx.saved_tensors
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
with torch.no_grad():
|
2021-12-27 07:04:32 +00:00
|
|
|
A_grad = Matmul_ABT_2p5D.apply(B, output_grad, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank,
|
|
|
|
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
|
|
|
|
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
|
|
|
|
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
|
|
|
|
B_grad = Matmul_AB_2p5D.apply(A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank,
|
|
|
|
ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode,
|
|
|
|
ctx.data_parallel_rank, ctx.pipeline_parallel_rank,
|
|
|
|
ctx.pipeline_parallel_size, ctx.tensor_parallel_size)
|
2021-10-28 16:21:23 +00:00
|
|
|
return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None
|
|
|
|
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
class _Add_Bias_2p5D(torch.autograd.Function):
|
2022-04-11 05:44:50 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
2021-12-27 07:04:32 +00:00
|
|
|
def forward(ctx: Any, input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int,
|
|
|
|
row_rank: int, col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool,
|
|
|
|
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
|
|
|
|
tensor_parallel_size: int) -> Tensor:
|
2021-10-28 16:21:23 +00:00
|
|
|
if row_rank == 0:
|
|
|
|
bias_temp = bias.clone()
|
|
|
|
else:
|
2021-12-27 07:04:32 +00:00
|
|
|
bias_temp = torch.zeros(output_size_per_partition, dtype=bias.dtype, device=get_current_device())
|
2022-03-09 13:42:30 +00:00
|
|
|
src_rank = \
|
|
|
|
col_rank + dep_rank * tesseract_dim ** 2 + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
2021-10-28 16:21:23 +00:00
|
|
|
dist.broadcast(bias_temp, src=src_rank, group=get_parallel_group(col_parallel_mode))
|
|
|
|
|
|
|
|
ctx.row_rank = row_rank
|
|
|
|
ctx.col_rank = col_rank
|
|
|
|
ctx.dep_rank = dep_rank
|
|
|
|
ctx.tesseract_dim = tesseract_dim
|
|
|
|
ctx.col_parallel_mode = col_parallel_mode
|
|
|
|
ctx.bias = skip_bias_add
|
|
|
|
ctx.data_parallel_rank = data_parallel_rank
|
|
|
|
ctx.pipeline_parallel_rank = pipeline_parallel_rank
|
|
|
|
ctx.pipeline_parallel_size = pipeline_parallel_size
|
|
|
|
ctx.tensor_parallel_size = tensor_parallel_size
|
|
|
|
|
|
|
|
if skip_bias_add:
|
|
|
|
return bias_temp
|
|
|
|
else:
|
|
|
|
output = input + bias_temp
|
|
|
|
return output
|
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
2021-10-28 16:21:23 +00:00
|
|
|
row_rank = ctx.row_rank
|
|
|
|
col_rank = ctx.col_rank
|
|
|
|
dep_rank = ctx.dep_rank
|
|
|
|
tesseract_dim = ctx.tesseract_dim
|
|
|
|
col_parallel_mode = ctx.col_parallel_mode
|
|
|
|
data_parallel_rank = ctx.data_parallel_rank
|
|
|
|
pipeline_parallel_rank = ctx.pipeline_parallel_rank
|
|
|
|
pipeline_parallel_size = ctx.pipeline_parallel_size
|
|
|
|
tensor_parallel_size = ctx.tensor_parallel_size
|
|
|
|
|
|
|
|
if ctx.bias:
|
2022-03-09 13:42:30 +00:00
|
|
|
dst_rank = \
|
|
|
|
col_rank + dep_rank * (tesseract_dim ** 2) + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
2021-10-28 16:21:23 +00:00
|
|
|
dist.reduce(output_grad, dst=dst_rank, group=get_parallel_group(col_parallel_mode))
|
|
|
|
if row_rank == 0:
|
2022-03-09 13:42:30 +00:00
|
|
|
return \
|
|
|
|
None, output_grad, None, None, None, None, None, None, \
|
|
|
|
None, None, None, None, None, None, None, None
|
2021-10-28 16:21:23 +00:00
|
|
|
else:
|
|
|
|
grad_tmp = torch.zeros_like(output_grad)
|
2022-03-09 13:42:30 +00:00
|
|
|
return \
|
|
|
|
None, grad_tmp, None, None, None, None, None, None, \
|
|
|
|
None, None, None, None, None, None, None, None
|
2021-10-28 16:21:23 +00:00
|
|
|
else:
|
|
|
|
reduce_dim = tuple(range(output_grad.ndim - 1))
|
|
|
|
reduce = torch.sum(output_grad, dim=reduce_dim)
|
2022-03-09 13:42:30 +00:00
|
|
|
dst_rank = \
|
|
|
|
col_rank + dep_rank * (tesseract_dim ** 2) + \
|
|
|
|
data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \
|
|
|
|
pipeline_parallel_rank * tensor_parallel_size
|
2021-10-28 16:21:23 +00:00
|
|
|
dist.reduce(reduce, dst=dst_rank, group=get_parallel_group(col_parallel_mode))
|
|
|
|
if row_rank == 0:
|
2022-03-09 13:42:30 +00:00
|
|
|
return \
|
|
|
|
output_grad, reduce, None, None, None, None, None, None, None, \
|
|
|
|
None, None, None, None, None, None, None, None
|
2021-10-28 16:21:23 +00:00
|
|
|
else:
|
|
|
|
reduce_tmp = torch.zeros_like(reduce)
|
2022-03-09 13:42:30 +00:00
|
|
|
return \
|
|
|
|
output_grad, reduce_tmp, None, None, None, None, None, None, \
|
|
|
|
None, None, None, None, None, None, None, None, None
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
def add_bias_2p5d(input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int, row_rank: int,
|
|
|
|
col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool,
|
|
|
|
data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int,
|
|
|
|
tensor_parallel_size: int) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Matrix add bias: :math:`C = A + b`.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
input (:class:`torch.tensor`): matrix :math:`A`.
|
|
|
|
bias (:class:`torch.tensor`): matrix :math:`B`.
|
|
|
|
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism.
|
|
|
|
output_size_per_partition (int): output size in each partition.
|
|
|
|
row_rank (int): the rank of row.
|
|
|
|
col_rank (int): the rank of column.
|
|
|
|
dep_rank (int): the rank of depth.
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer,
|
|
|
|
which is preserved for kernel fusion.
|
|
|
|
data_parallel_rank (int): data parallel rank.
|
|
|
|
pipeline_parallel_rank (int): pipeline parallel rank
|
|
|
|
pipeline_parallel_size (int): pipeline parallel size.
|
|
|
|
tensor_parallel_size (int): tensor parallel size.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2022-02-14 03:15:02 +00:00
|
|
|
"""
|
|
|
|
return _Add_Bias_2p5D.apply(input, bias, output_size_per_partition, tesseract_dim, row_rank, col_rank, dep_rank,
|
|
|
|
col_parallel_mode, skip_bias_add, data_parallel_rank, pipeline_parallel_rank,
|
|
|
|
pipeline_parallel_size, tensor_parallel_size)
|
|
|
|
|
|
|
|
|
|
|
|
class _Layernorm2p5D(torch.autograd.Function):
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Layernorm.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
input (:class:`torch.tensor`): input matrix.
|
|
|
|
E_x (:class:`torch.tensor`): mean.
|
|
|
|
Var_x (:class:`torch.tensor`): variance.
|
|
|
|
hidden_size (int): hidden size.
|
|
|
|
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2022-01-10 10:05:58 +00:00
|
|
|
"""
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float32)
|
2021-12-27 07:04:32 +00:00
|
|
|
def forward(ctx: Any, input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int,
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
row_parallel_mode: ParallelMode) -> Tensor:
|
2021-10-28 16:21:23 +00:00
|
|
|
input = input - E_x
|
|
|
|
# in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps)
|
|
|
|
ctx.hidden_size = hidden_size
|
|
|
|
output = input * Var_x
|
|
|
|
ctx.save_for_backward(output, Var_x)
|
|
|
|
ctx.row_parallel_mode = row_parallel_mode
|
|
|
|
return output
|
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
2021-10-28 16:21:23 +00:00
|
|
|
def backward(ctx, output_grad):
|
|
|
|
row_parallel_mode = ctx.row_parallel_mode
|
|
|
|
x, Var_x = ctx.saved_tensors
|
|
|
|
# in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x
|
|
|
|
with torch.no_grad():
|
|
|
|
output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True)
|
2021-12-27 07:04:32 +00:00
|
|
|
torch.distributed.all_reduce(output_grad_sum, group=get_parallel_group(row_parallel_mode))
|
2021-10-28 16:21:23 +00:00
|
|
|
output_grad_sum /= ctx.hidden_size
|
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
output_grad_mul_x_sum = torch.sum(output_grad * x, dim=-1, keepdim=True)
|
|
|
|
torch.distributed.all_reduce(output_grad_mul_x_sum, group=get_parallel_group(row_parallel_mode))
|
2021-10-28 16:21:23 +00:00
|
|
|
output_grad_mul_x_sum /= ctx.hidden_size
|
|
|
|
|
|
|
|
input_grad = output_grad.clone()
|
|
|
|
input_grad -= x * output_grad_mul_x_sum
|
|
|
|
input_grad -= output_grad_sum
|
|
|
|
input_grad *= Var_x
|
|
|
|
|
|
|
|
return input_grad, None, None, None, None, None, None
|
|
|
|
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
def layernorm_2p5d(input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int,
|
|
|
|
row_parallel_mode: ParallelMode) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Layernorm.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
input (:class:`torch.tensor`): input matrix.
|
|
|
|
E_x (:class:`torch.tensor`): mean.
|
|
|
|
Var_x (:class:`torch.tensor`): variance.
|
|
|
|
hidden_size (int): hidden size.
|
|
|
|
row_parallel_mode (:class:`colossalai.context.ParallelMode`): row parallel mode.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
|
2022-01-10 10:05:58 +00:00
|
|
|
"""
|
2022-02-14 03:15:02 +00:00
|
|
|
return _Layernorm2p5D.apply(input, E_x, Var_x, hidden_size, row_parallel_mode)
|
|
|
|
|
|
|
|
|
|
|
|
class _AllGatherTensor2p5D(torch.autograd.Function):
|
2022-04-11 05:44:50 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
2022-02-14 03:15:02 +00:00
|
|
|
def forward(ctx: Any, inputs: Tensor, dim: int, col_parallel_mode: ParallelMode) -> Tensor:
|
2021-12-27 07:04:32 +00:00
|
|
|
ctx.dim = dim
|
2022-02-14 03:15:02 +00:00
|
|
|
ctx.col_parallel_mode = col_parallel_mode
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
outputs = all_gather(inputs, dim, col_parallel_mode)
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
return outputs
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
2022-02-14 03:15:02 +00:00
|
|
|
grad = reduce_scatter(output_grad, ctx.dim, ctx.col_parallel_mode)
|
|
|
|
return grad.contiguous(), None, None
|
|
|
|
|
|
|
|
|
|
|
|
def all_gather_tensor_2p5d(inputs: Tensor, dim: int, col_parallel_mode: ParallelMode) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""all gather the weight of 2.5D parallelism.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
inputs (:class:`torch.tensor`): input tensor.
|
|
|
|
dim (int): dimension of all-gather.
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
|
2022-02-14 03:15:02 +00:00
|
|
|
"""
|
|
|
|
return _AllGatherTensor2p5D.apply(inputs, dim, col_parallel_mode)
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
class SplitFirst(torch.autograd.Function):
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""
|
|
|
|
|
|
|
|
Args:
|
|
|
|
inputs (:class:`torch.tensor`): input tensor.
|
|
|
|
tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism
|
|
|
|
col_parallel_mode (:class:`colossalai.context.ParallelMode`): column parallel mode.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
|
2022-01-10 10:05:58 +00:00
|
|
|
"""
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_fwd(cast_inputs=torch.float16)
|
2021-12-27 07:04:32 +00:00
|
|
|
def forward(ctx: Any, inputs: Tensor, tesseract_dim: int, col_parallel_mode: ParallelMode) -> Tensor:
|
2021-10-28 16:21:23 +00:00
|
|
|
ctx.tesseract_dim = tesseract_dim
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
ctx.batch_size = inputs.size(0)
|
|
|
|
ctx.para_mode = col_parallel_mode
|
|
|
|
row_rank = gpc.get_local_rank(col_parallel_mode)
|
|
|
|
|
|
|
|
outputs = inputs.chunk(tesseract_dim, dim=0)[row_rank]
|
|
|
|
return outputs
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
@staticmethod
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
@custom_bwd
|
|
|
|
def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]:
|
2022-03-09 13:42:30 +00:00
|
|
|
grad_shape = (ctx.batch_size,) + output_grad.shape[1:]
|
2021-12-27 07:04:32 +00:00
|
|
|
grad = torch.empty(grad_shape, dtype=output_grad.dtype, device=get_current_device())
|
|
|
|
dist.all_gather(list(grad.chunk(ctx.tesseract_dim, dim=0)),
|
|
|
|
output_grad.contiguous(),
|
|
|
|
group=gpc.get_group(ctx.para_mode))
|
|
|
|
return grad, None, None
|
|
|
|
|
|
|
|
|
2022-04-02 08:12:04 +00:00
|
|
|
def split_batch_2p5d(input_: Tensor, dim: int = 0) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
"""Splits 2P5D tensor in specified dimension across cols.
|
2022-01-21 02:44:30 +00:00
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
input_ (:class:`torch.tensor`): Input tensor.
|
|
|
|
dim (int): Specified dimension in which to split.
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Returns:
|
|
|
|
:class:`torch.tensor`: The tensor has been split.
|
2022-01-21 02:44:30 +00:00
|
|
|
"""
|
2022-04-02 08:12:04 +00:00
|
|
|
dim_size = input_.size(dim)
|
|
|
|
world_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
|
2022-04-16 13:29:57 +00:00
|
|
|
|
|
|
|
if world_size <= 1:
|
|
|
|
return input_
|
|
|
|
|
2022-04-02 08:12:04 +00:00
|
|
|
assert dim_size % world_size == 0, \
|
|
|
|
f'The batch size ({dim_size}) is not a multiple of 2.5D size * depth ({world_size}).'
|
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
return torch.chunk(input_, gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL),
|
|
|
|
dim=dim)[gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)].contiguous()
|
|
|
|
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
class _ReduceTensor2p5D(torch.autograd.Function):
|
2022-04-11 05:44:50 +00:00
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
@staticmethod
|
|
|
|
def forward(ctx, input_, parallel_mode):
|
|
|
|
return all_reduce(input_, parallel_mode)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def backward(ctx, output_grad):
|
|
|
|
return output_grad, None
|
|
|
|
|
|
|
|
|
|
|
|
def reduce_tensor_2p5d(input_: Tensor, parallel_mode: ParallelMode) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""All-reduce the input.
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
input_ (:class:`torch.tensor`): Input tensor.
|
|
|
|
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2022-02-14 03:15:02 +00:00
|
|
|
"""
|
|
|
|
return _ReduceTensor2p5D.apply(input_, parallel_mode)
|
|
|
|
|
|
|
|
|
|
|
|
class _ReduceScatterTensor2p5D(torch.autograd.Function):
|
2022-04-11 05:44:50 +00:00
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
@staticmethod
|
|
|
|
def forward(ctx, input_, dim, parallel_mode):
|
|
|
|
ctx.dim = dim
|
|
|
|
ctx.parallel_mode = parallel_mode
|
|
|
|
return reduce_scatter(input_, dim, parallel_mode)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def backward(ctx, output_grad):
|
|
|
|
return all_gather(output_grad, ctx.dim, ctx.parallel_mode), None, None
|
|
|
|
|
|
|
|
|
|
|
|
def reduce_scatter_tensor_2p5d(input_: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""Reduce-scatter the input.
|
2022-03-09 13:42:30 +00:00
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
input_ (:class:`torch.tensor`): Input tensor.
|
|
|
|
dim (int): Dimension to reduce.
|
|
|
|
parallel_mode (:class:`colossalai.context.ParallelMode`): The parallel mode tensor used.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
|
|
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_
|
2022-01-10 10:05:58 +00:00
|
|
|
"""
|
2022-04-02 08:12:04 +00:00
|
|
|
dim_size = input_.size(dim)
|
|
|
|
world_size = gpc.get_world_size(parallel_mode)
|
|
|
|
assert dim_size % world_size == 0, \
|
|
|
|
f'The batch size ({dim_size}) is not a multiple of 2.5D size * depth ({world_size}).'
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
return _ReduceScatterTensor2p5D.apply(input_, dim, parallel_mode)
|
|
|
|
|
|
|
|
|
|
|
|
class _RreduceByBatch2p5D(torch.autograd.Function):
|
2022-04-11 05:44:50 +00:00
|
|
|
|
2021-12-27 07:04:32 +00:00
|
|
|
@staticmethod
|
2021-12-29 15:32:10 +00:00
|
|
|
def symbolic(graph, input_, reduce_mean: bool = False):
|
|
|
|
output = all_reduce(input_, ParallelMode.PARALLEL_2P5D_COL)
|
|
|
|
if reduce_mean:
|
|
|
|
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
|
|
|
|
return output / reduce_size
|
|
|
|
return output
|
2021-12-27 07:04:32 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@custom_fwd(cast_inputs=torch.float32)
|
2021-12-29 15:32:10 +00:00
|
|
|
def forward(ctx, input_, reduce_mean: bool = False):
|
|
|
|
output = all_reduce(input_, ParallelMode.PARALLEL_2P5D_COL)
|
|
|
|
ctx.reduce_mean = reduce_mean
|
|
|
|
if reduce_mean:
|
|
|
|
reduce_size = gpc.get_world_size(ParallelMode.PARALLEL_2P5D_COL)
|
|
|
|
ctx.reduce_size = reduce_size
|
|
|
|
return output.clone() / reduce_size
|
|
|
|
return output.clone()
|
2021-12-27 07:04:32 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
@custom_bwd
|
2021-12-29 15:32:10 +00:00
|
|
|
def backward(ctx, output_grad):
|
|
|
|
if ctx.reduce_mean:
|
|
|
|
return output_grad / ctx.reduce_size, None
|
|
|
|
else:
|
|
|
|
return output_grad, None
|
2022-02-14 03:15:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
def reduce_by_batch_2p5d(input_, reduce_mean: bool = False) -> Tensor:
|
2022-03-25 05:02:39 +00:00
|
|
|
r"""All-reduce the input from the model parallel region.
|
2022-02-14 03:15:02 +00:00
|
|
|
|
2022-03-25 05:02:39 +00:00
|
|
|
Args:
|
|
|
|
input_ (:class:`torch.tensor`): input matrix.
|
|
|
|
reduce_mean (bool, optional):
|
|
|
|
If set to ``True``, it will divide the output by column parallel size, default to False.
|
2022-02-14 03:15:02 +00:00
|
|
|
"""
|
2022-03-09 13:42:30 +00:00
|
|
|
return _RreduceByBatch2p5D.apply(input_, reduce_mean)
|