mirror of https://github.com/hpcaitech/ColossalAI
aibig-modeldata-parallelismdeep-learningdistributed-computingfoundation-modelsheterogeneous-traininghpcinferencelarge-scalemodel-parallelismpipeline-parallelism
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
67 lines
2.5 KiB
67 lines
2.5 KiB
from functools import partial |
|
|
|
import colossalai |
|
import pytest |
|
import torch.multiprocessing as mp |
|
from colossalai.amp import AMP_TYPE |
|
from colossalai.core import global_context as gpc |
|
from colossalai.utils import free_port |
|
from tests.components_to_test.registry import non_distributed_component_funcs |
|
from colossalai.testing import parameterize, rerun_if_address_is_in_use |
|
|
|
CONFIG = dict(parallel=dict(pipeline=dict(size=1), tensor=dict(size=1, mode=None)), |
|
fp16=dict(mode=None), |
|
clip_grad_norm=1.0) |
|
|
|
|
|
@parameterize('model_name', ['repeated_computed_layers', 'resnet18', 'repeated_computed_layers']) |
|
@parameterize('amp_mode', [AMP_TYPE.APEX, AMP_TYPE.TORCH, AMP_TYPE.NAIVE, None]) |
|
def run_train(model_name, amp_mode): |
|
# FIXME: test bert |
|
get_components_func = non_distributed_component_funcs.get_callable(model_name) |
|
gpc.config.fp16['mode'] = amp_mode |
|
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func() |
|
|
|
model = model_builder(checkpoint=False) |
|
engine, train_dataloader, *args = colossalai.initialize(model=model, |
|
optimizer=optimizer_class(model.parameters(), lr=1e-3), |
|
criterion=criterion, |
|
train_dataloader=train_dataloader) |
|
|
|
try: |
|
engine.train() |
|
for data, label in train_dataloader: |
|
engine.zero_grad() |
|
data = data.cuda() |
|
label = label.cuda() |
|
if criterion: |
|
output = engine(data) |
|
loss = engine.criterion(output, label) |
|
else: |
|
loss = engine(data, label) |
|
engine.backward(loss) |
|
engine.step() |
|
break |
|
except IndexError: |
|
# if using apex amp, NetWithRepeatedlyComputedLayers will raise an index out of range issue |
|
# the following check fails in apex |
|
# if cached_x.grad_fn.next_functions[1][0].variable is not x: |
|
pass |
|
|
|
|
|
def run_engine(rank, world_size, port): |
|
# init dist env |
|
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') |
|
run_train() |
|
|
|
|
|
@pytest.mark.dist |
|
@rerun_if_address_is_in_use() |
|
def test_engine(): |
|
world_size = 2 |
|
run_func = partial(run_engine, world_size=world_size, port=free_port()) |
|
mp.spawn(run_func, nprocs=world_size) |
|
|
|
|
|
if __name__ == '__main__': |
|
test_engine()
|
|
|