2021-10-28 16:21:23 +00:00
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import torch
import torch . distributed as dist
2021-12-27 07:04:32 +00:00
from torch . distributed import ReduceOp
2021-10-28 16:21:23 +00:00
from torch import Tensor
from colossalai . context import ParallelMode
from colossalai . core import global_context as gpc
2022-04-01 08:52:20 +00:00
def all_gather ( tensor : Tensor ,
dim : int ,
parallel_mode : ParallelMode ,
on_cpu : bool = False ,
async_op : bool = False ) - > Tensor :
2022-03-25 05:02:39 +00:00
r """ Gathers all tensors from the parallel group and concatenates them in a
2021-10-28 16:21:23 +00:00
specific dimension .
2022-03-25 05:02:39 +00:00
Note :
The parallel_mode should be concluded in ` ` ParallelMode ` ` . More details about ` ` ParallelMode ` ` could be found
in ` parallel_mode < https : / / github . com / hpcaitech / ColossalAI / blob / main / colossalai / context / parallel_mode . py > ` _ .
Args :
tensor ( : class : ` torch . Tensor ` ) : Tensor to be gathered .
dim ( int ) : The dimension concatenating in .
parallel_mode ( : class : ` colossalai . context . ParallelMode ` ) : Parallel group mode used in this communication .
2022-04-01 08:52:20 +00:00
on_cpu ( bool , optional ) : Whether to communicate with Gloo backend .
2022-03-25 05:02:39 +00:00
async_op ( bool , optional ) : Whether operations are asynchronous .
Returns :
Union [ tuple ( : class : ` torch . Tensor ` , work handle ) , : class : ` torch . Tensor ` ] : The result of all - together only ,
if async_op is set to False . A tuple of output of all - gather and Async work handle , if async_op is set to True .
2021-10-28 16:21:23 +00:00
"""
depth = gpc . get_world_size ( parallel_mode )
2021-12-27 07:04:32 +00:00
if depth == 1 :
2022-02-14 03:15:02 +00:00
out = tensor
2021-12-27 07:04:32 +00:00
work = None
else :
shape = list ( tensor . shape )
shape [ 0 ] , shape [ dim ] = shape [ dim ] , shape [ 0 ]
shape [ 0 ] * = depth
2022-04-01 08:52:20 +00:00
out = torch . empty ( shape , dtype = tensor . dtype , device = tensor . device )
2021-12-27 07:04:32 +00:00
temp = list ( torch . chunk ( out , depth , dim = 0 ) )
2022-04-01 08:52:20 +00:00
group = gpc . get_cpu_group ( parallel_mode ) if on_cpu else gpc . get_group ( parallel_mode )
2021-12-27 07:04:32 +00:00
work = dist . all_gather ( tensor_list = temp ,
tensor = tensor . transpose ( 0 , dim ) . contiguous ( ) ,
2022-04-01 08:52:20 +00:00
group = group ,
2021-12-27 07:04:32 +00:00
async_op = async_op )
out = torch . transpose ( out , 0 , dim )
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
if async_op :
2021-12-27 07:04:32 +00:00
return out , work
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
else :
return out
2021-10-28 16:21:23 +00:00
2021-12-27 07:04:32 +00:00
def reduce_scatter ( tensor : Tensor ,
dim : int ,
parallel_mode : ParallelMode ,
op : ReduceOp = ReduceOp . SUM ,
2022-04-01 08:52:20 +00:00
on_cpu : bool = False ,
2021-12-27 07:04:32 +00:00
async_op : bool = False ) - > Tensor :
2022-03-25 05:02:39 +00:00
r """ Reduces all tensors then scatters it in a specific dimension to all
2021-10-28 16:21:23 +00:00
members in the parallel group .
2022-03-25 05:02:39 +00:00
Note :
The parallel_mode should be concluded in ` ` ParallelMode ` ` . More details about ` ` ParallelMode ` ` could be found
in ` parallel_mode < https : / / github . com / hpcaitech / ColossalAI / blob / main / colossalai / context / parallel_mode . py > ` _ .
Args :
tensor ( : class : ` torch . Tensor ` ) : Tensor to be reduce_scattered .
dim ( int ) : The dimension concatenating in .
parallel_mode ( : class : ` colossalai . context . ParallelMode ` ) : Parallel group mode used in this communication .
op ( torch . distributed . ReduceOp , optional ) : The type of reduce operation ,
should be included in [ SUM , AVG , PRODUCT , MIN , MAX , BAND , BOR , BXOR ] .
More details about ReduceOp please refer to
` ReduceOp < https : / / pytorch . org / docs / stable / distributed . html #torch.distributed.ReduceOp>`_.
2022-04-01 08:52:20 +00:00
on_cpu ( bool , optional ) : Whether to communicate with Gloo backend .
2022-03-25 05:02:39 +00:00
async_op ( bool , optional ) : Whether operations are asynchronous .
Returns :
Union [ tuple ( : class : ` torch . Tensor ` , work handle ) , : class : ` torch . Tensor ` ] : The result of reduce_scatter only ,
if async_op is set to False . A tuple of output of all - gather and Async work handle , if async_op is set to True .
2021-10-28 16:21:23 +00:00
"""
depth = gpc . get_world_size ( parallel_mode )
2021-12-27 07:04:32 +00:00
if depth == 1 :
out = tensor
work = None
else :
temp = list ( map ( lambda x : x . contiguous ( ) , torch . chunk ( tensor , depth , dim = dim ) ) )
2022-04-01 08:52:20 +00:00
out = torch . empty ( temp [ 0 ] . shape , dtype = tensor . dtype , device = tensor . device )
group = gpc . get_cpu_group ( parallel_mode ) if on_cpu else gpc . get_group ( parallel_mode )
work = dist . reduce_scatter ( output = out , input_list = temp , op = op , group = group , async_op = async_op )
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
if async_op :
2021-12-27 07:04:32 +00:00
return out , work
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
else :
return out
2021-10-28 16:21:23 +00:00
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
def all_reduce ( tensor : Tensor ,
parallel_mode : ParallelMode ,
2021-12-27 07:04:32 +00:00
op : ReduceOp = ReduceOp . SUM ,
2022-04-01 08:52:20 +00:00
on_cpu : bool = False ,
2021-12-27 07:04:32 +00:00
async_op : bool = False ) - > Tensor :
2022-03-25 05:02:39 +00:00
r """ Reduces the tensor data across whole parallel group in such a way that all get the final result.
Note :
The parallel_mode should be concluded in ` ` ParallelMode ` ` . More details about ` ` ParallelMode ` ` could be found
in ` parallel_mode < https : / / github . com / hpcaitech / ColossalAI / blob / main / colossalai / context / parallel_mode . py > ` _ .
Args :
tensor ( : class : ` torch . Tensor ` ) : Tensor to be all - reduced .
parallel_mode ( : class : ` colossalai . context . ParallelMode ` ) : Parallel group mode used in this communication .
op ( torch . distributed . ReduceOp , optional ) : The type of reduce operation ,
should be included in [ SUM , AVG , PRODUCT , MIN , MAX , BAND , BOR , BXOR ] .
More details about ReduceOp please refer to
` ReduceOp < https : / / pytorch . org / docs / stable / distributed . html #torch.distributed.ReduceOp>`_.
2022-04-01 08:52:20 +00:00
on_cpu ( bool , optional ) : Whether to communicate with Gloo backend .
2022-03-25 05:02:39 +00:00
async_op ( bool , optional ) : Whether operations are asynchronous .
Returns :
Union [ tuple ( : class : ` torch . Tensor ` , work handle ) , : class : ` torch . Tensor ` ] : The result of all - gather only ,
if async_op is set to False . A tuple of output of all - gather and Async work handle , if async_op is set to True .
"""
2021-12-27 07:04:32 +00:00
depth = gpc . get_world_size ( parallel_mode )
if depth == 1 :
2022-02-14 03:15:02 +00:00
out = tensor
2021-12-27 07:04:32 +00:00
work = None
else :
2022-02-14 03:15:02 +00:00
out = tensor . contiguous ( )
2022-04-01 08:52:20 +00:00
group = gpc . get_cpu_group ( parallel_mode ) if on_cpu else gpc . get_group ( parallel_mode )
work = dist . all_reduce ( out , op = op , group = group , async_op = async_op )
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
if async_op :
2022-02-14 03:15:02 +00:00
return out , work
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
else :
2022-02-14 03:15:02 +00:00
return out
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
2022-04-01 08:52:20 +00:00
def broadcast ( tensor : Tensor , src : int , parallel_mode : ParallelMode , on_cpu : bool = False , async_op : bool = False ) :
2022-03-25 05:02:39 +00:00
r """ Broadcast tensors to whole parallel group. Tensor must have the same
number of elements in all processes participating in the collective .
Note :
The parallel_mode should be concluded in ` ` ParallelMode ` ` . More details about ` ` ParallelMode ` ` could be found
in ` parallel_mode < https : / / github . com / hpcaitech / ColossalAI / blob / main / colossalai / context / parallel_mode . py > ` _ .
Args :
tensor ( : class : ` torch . Tensor ` ) : Tensor to be broadcast .
src ( int ) : Source rank .
parallel_mode ( : class : ` colossalai . context . ParallelMode ` ) : Parallel group mode used in this communication .
2022-04-01 08:52:20 +00:00
on_cpu ( bool , optional ) : Whether to communicate with Gloo backend .
2022-03-25 05:02:39 +00:00
async_op ( bool , optional ) : Whether operations are asynchronous .
Returns :
Union [ tuple ( : class : ` torch . Tensor ` , work handle ) , : class : ` torch . Tensor ` ] : The tensor need to be broadcast only ,
if async_op is set to False . A tuple of output of all - gather and Async work handle , if async_op is set to True .
"""
2021-12-27 07:04:32 +00:00
depth = gpc . get_world_size ( parallel_mode )
if depth == 1 :
2022-02-14 03:15:02 +00:00
out = tensor
2021-12-27 07:04:32 +00:00
work = None
else :
2022-02-14 03:15:02 +00:00
out = tensor . contiguous ( )
2022-04-01 08:52:20 +00:00
group = gpc . get_cpu_group ( parallel_mode ) if on_cpu else gpc . get_group ( parallel_mode )
work = dist . broadcast ( out , src = src , group = group , async_op = async_op )
2021-12-27 07:04:32 +00:00
if async_op :
2022-02-14 03:15:02 +00:00
return out , work
2021-12-27 07:04:32 +00:00
else :
2022-02-14 03:15:02 +00:00
return out
2021-12-27 07:04:32 +00:00
2022-04-01 08:52:20 +00:00
def reduce ( tensor : Tensor ,
dst : int ,
parallel_mode : ParallelMode ,
op : ReduceOp = ReduceOp . SUM ,
on_cpu : bool = False ,
async_op : bool = False ) :
2022-03-25 05:02:39 +00:00
r """ Reduce tensors across whole parallel group. Only the process with
rank ` ` dst ` ` is going to receive the final result .
Note :
The parallel_mode should be concluded in ` ` ParallelMode ` ` . More details about ` ` ParallelMode ` ` could be found
in ` parallel_mode < https : / / github . com / hpcaitech / ColossalAI / blob / main / colossalai / context / parallel_mode . py > ` _ .
Args :
tensor ( : class : ` torch . Tensor ` ) : Tensor to be reduced .
dst ( int ) : Destination rank .
parallel_mode ( : class : ` colossalai . context . ParallelMode ` ) : Parallel group mode used in this communication .
2022-04-01 08:52:20 +00:00
on_cpu ( bool , optional ) : Whether to communicate with Gloo backend .
2022-03-25 05:02:39 +00:00
async_op ( bool , optional ) : Whether operations are asynchronous .
Returns :
Union [ tuple ( : class : ` torch . Tensor ` , work handle ) , : class : ` torch . Tensor ` ] : The result of reduce only ,
if async_op is set to False . A tuple of output of all - gather and Async work handle , if async_op is set to True .
"""
2021-12-27 07:04:32 +00:00
depth = gpc . get_world_size ( parallel_mode )
if depth == 1 :
2022-02-14 03:15:02 +00:00
out = tensor
2021-12-27 07:04:32 +00:00
work = None
else :
2022-02-14 03:15:02 +00:00
out = tensor . contiguous ( )
2022-04-01 08:52:20 +00:00
group = gpc . get_cpu_group ( parallel_mode ) if on_cpu else gpc . get_group ( parallel_mode )
work = dist . reduce ( out , dst = dst , op = op , group = group , async_op = async_op )
2021-12-27 07:04:32 +00:00
if async_op :
2022-02-14 03:15:02 +00:00
return out , work
2021-12-27 07:04:32 +00:00
else :
2022-02-14 03:15:02 +00:00
return out
2022-04-01 08:52:20 +00:00
2022-04-25 05:41:43 +00:00
def scatter_object_list ( scatter_object_output_list , scatter_object_input_list , src = 0 , group = None ) - > None :
2022-04-01 08:52:20 +00:00
r """ Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues
"""
2022-04-12 01:44:40 +00:00
if dist . distributed_c10d . _rank_not_in_group ( group ) :
2022-04-01 08:52:20 +00:00
return
if ( not isinstance ( scatter_object_output_list , list ) or len ( scatter_object_output_list ) < 1 ) :
raise RuntimeError ( " Expected argument scatter_object_output_list to be a list of size at least 1. " )
# set tensor device to cuda if backend is nccl
device = torch . cuda . current_device ( ) if dist . get_backend ( group ) == ' nccl ' else torch . device ( " cpu " )
2022-04-12 01:44:40 +00:00
my_rank = dist . get_rank ( ) # use global rank
2022-04-01 08:52:20 +00:00
if my_rank == src :
tensor_list , tensor_sizes = zip (
* [ dist . distributed_c10d . _object_to_tensor ( obj ) for obj in scatter_object_input_list ] )
tensor_list = list ( map ( lambda x : x . to ( device ) , tensor_list ) )
tensor_sizes = list ( map ( lambda x : x . to ( device ) , tensor_sizes ) )
# Src rank broadcasts the maximum tensor size. This is because all ranks are
# expected to call into scatter() with equal-sized tensors.
if my_rank == src :
max_tensor_size = max ( tensor_sizes )
for tensor in tensor_list :
tensor . resize_ ( max_tensor_size )
else :
max_tensor_size = torch . tensor ( [ 0 ] , dtype = torch . long ) . to ( device )
dist . broadcast ( max_tensor_size , src = src , group = group )
# Scatter actual serialized objects
output_tensor = torch . empty ( max_tensor_size . item ( ) , dtype = torch . uint8 ) . to ( device )
dist . scatter (
output_tensor ,
scatter_list = None if my_rank != src else tensor_list ,
src = src ,
group = group ,
)
# Scatter per-object sizes to trim tensors when deserializing back to object
obj_tensor_size = torch . tensor ( [ 0 ] , dtype = torch . long ) . to ( device )
dist . scatter (
obj_tensor_size ,
scatter_list = None if my_rank != src else tensor_sizes ,
src = src ,
group = group ,
)
output_tensor , obj_tensor_size = output_tensor . cpu ( ) , obj_tensor_size . cpu ( )
# Deserialize back to object
scatter_object_output_list [ 0 ] = dist . distributed_c10d . _tensor_to_object ( output_tensor , obj_tensor_size )