2021-12-29 15:32:10 +00:00
|
|
|
from functools import partial
|
|
|
|
|
2021-10-28 16:21:23 +00:00
|
|
|
import pytest
|
2021-12-16 02:32:08 +00:00
|
|
|
import torch
|
|
|
|
import torch.multiprocessing as mp
|
2021-10-28 16:21:23 +00:00
|
|
|
from colossalai.core import global_context as gpc
|
2021-12-16 02:32:08 +00:00
|
|
|
from colossalai.initialize import launch
|
2022-02-14 03:15:02 +00:00
|
|
|
from colossalai.logging import disable_existing_loggers
|
2021-12-29 15:32:10 +00:00
|
|
|
from colossalai.utils import free_port
|
2021-12-16 02:32:08 +00:00
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
from checks_2p5d.check_layer_2p5d import *
|
2021-12-29 15:32:10 +00:00
|
|
|
from checks_2p5d.check_operation_2p5d import check_AB, check_ABT, check_ATB
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
CONFIG = dict(
|
|
|
|
parallel=dict(
|
|
|
|
pipeline=dict(size=1),
|
2021-12-27 07:04:32 +00:00
|
|
|
tensor=dict(size=4, mode='2.5d', depth=1),
|
2021-10-28 16:21:23 +00:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def check_operations():
|
|
|
|
check_AB()
|
|
|
|
check_ABT()
|
|
|
|
check_ATB()
|
|
|
|
|
|
|
|
|
|
|
|
def check_layer():
|
|
|
|
check_linear()
|
|
|
|
check_layernorm()
|
2022-02-14 03:15:02 +00:00
|
|
|
check_embed()
|
|
|
|
check_patch_embed()
|
|
|
|
check_vocab_parallel_embed()
|
|
|
|
check_classifier_no_given_weight()
|
|
|
|
check_vocab_parallel_classifier_no_given_weight()
|
|
|
|
check_classifier_given_embed_weight()
|
|
|
|
check_vocab_parallel_classifier_given_embed_weight()
|
|
|
|
check_loss()
|
|
|
|
check_vocab_parallel_loss()
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
|
2021-12-29 15:32:10 +00:00
|
|
|
def check_layer_and_operation(rank, world_size, port):
|
2022-02-14 03:15:02 +00:00
|
|
|
disable_existing_loggers()
|
Develop/experiments (#59)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
* Split conv2d, class token, positional embedding in 2d, Fix random number in ddp
Fix convergence in cifar10, Imagenet1000
* Integrate 1d tensor parallel in Colossal-AI (#39)
* fixed 1D and 2D convergence (#38)
* optimized 2D operations
* fixed 1D ViT convergence problem
* Feature/ddp (#49)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* support torch ddp
* fix loss accumulation
* add log for ddp
* change seed
* modify timing hook
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* Feature/pipeline (#40)
* remove redundancy func in setup (#19) (#20)
* use env to control the language of doc (#24) (#25)
* Support TP-compatible Torch AMP and Update trainer API (#27)
* Add gradient accumulation, fix lr scheduler
* fix FP16 optimizer and adapted torch amp with tensor parallel (#18)
* fixed bugs in compatibility between torch amp and tensor parallel and performed some minor fixes
* fixed trainer
* Revert "fixed trainer"
This reverts commit 2e0b0b76990e8d4e337add483d878c0f61cf5097.
* improved consistency between trainer, engine and schedule (#23)
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
* add an example of ViT-B/16 and remove w_norm clipping in LAMB (#29)
* add explanation for ViT example (#35) (#36)
* optimize communication of pipeline parallel
* fix grad clip for pipeline
Co-authored-by: Frank Lee <somerlee.9@gmail.com>
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* optimized 3d layer to fix slow computation ; tested imagenet performance with 3d; reworked lr_scheduler config definition; fixed launch args; fixed some printing issues; simplified apis of 3d layers (#51)
* Update 2.5d layer code to get a similar accuracy on imagenet-1k dataset
* update api for better usability (#58)
update api for better usability
Co-authored-by: 1SAA <c2h214748@gmail.com>
Co-authored-by: ver217 <lhx0217@gmail.com>
Co-authored-by: puck_WCR <46049915+WANG-CR@users.noreply.github.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
Co-authored-by: BoxiangW <45734921+BoxiangW@users.noreply.github.com>
2021-12-09 07:08:29 +00:00
|
|
|
launch(config=CONFIG,
|
2021-12-16 02:32:08 +00:00
|
|
|
rank=rank,
|
|
|
|
world_size=world_size,
|
|
|
|
host='localhost',
|
2021-12-29 15:32:10 +00:00
|
|
|
port=port,
|
2021-12-16 02:32:08 +00:00
|
|
|
backend='nccl')
|
|
|
|
|
2022-02-14 03:15:02 +00:00
|
|
|
torch.backends.cuda.matmul.allow_tf32 = False
|
|
|
|
torch.backends.cudnn.allow_tf32 = False
|
|
|
|
torch.backends.cudnn.deterministic = True
|
2021-10-28 16:21:23 +00:00
|
|
|
check_operations()
|
2021-12-16 02:32:08 +00:00
|
|
|
check_layer()
|
2021-10-28 16:21:23 +00:00
|
|
|
gpc.destroy()
|
2021-12-16 02:32:08 +00:00
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.dist
|
|
|
|
def test_2p5d():
|
2021-12-27 07:04:32 +00:00
|
|
|
world_size = 4
|
2021-12-29 15:32:10 +00:00
|
|
|
run_func = partial(check_layer_and_operation, world_size=world_size, port=free_port())
|
2021-12-16 02:32:08 +00:00
|
|
|
mp.spawn(run_func, nprocs=world_size)
|
2021-10-28 16:21:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
test_2p5d()
|