ColossalAI/tests/test_zero_data_parallel/test_sharded_optim_v2.py

104 lines
4.0 KiB
Python
Raw Normal View History

2022-03-04 03:49:02 +00:00
from functools import partial
import colossalai
import pytest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from colossalai.nn.optimizer import CPUAdam
from colossalai.testing import parameterize
2022-03-04 03:49:02 +00:00
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
2022-03-18 05:57:20 +00:00
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
2022-03-04 03:49:02 +00:00
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_model.utils import col_model_deepcopy
2022-03-04 03:49:02 +00:00
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
2022-03-09 08:09:36 +00:00
from tests.components_to_test.registry import non_distributed_component_funcs
from torch.nn.parallel import DistributedDataParallel as DDP
2022-03-09 08:09:36 +00:00
from common import CONFIG, check_sharded_params_padding
2022-03-04 03:49:02 +00:00
2022-03-16 04:22:45 +00:00
def _run_step(model, optimizer, data, label, criterion, enable_autocast=False):
2022-03-04 03:49:02 +00:00
model.train()
2022-03-04 05:40:48 +00:00
optimizer.zero_grad()
2022-03-04 03:49:02 +00:00
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
2022-03-04 03:49:02 +00:00
loss = loss.float()
2022-03-09 08:09:36 +00:00
if isinstance(model, ShardedModelV2):
optimizer.backward(loss)
else:
loss.backward()
optimizer.step()
2022-03-04 03:49:02 +00:00
@parameterize("cpu_offload", [True, False])
@parameterize("use_cpuadam", [True, False])
2022-03-18 05:57:20 +00:00
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam):
2022-03-09 08:09:36 +00:00
test_models = ['repeated_computed_layers', 'resnet18', 'bert']
2022-03-18 05:57:20 +00:00
shard_strategy = shard_strategy_class()
2022-03-16 04:22:45 +00:00
if use_cpuadam and cpu_offload is False:
return
2022-03-09 08:09:36 +00:00
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(convert_fp16=True,
target_device=torch.device(f'cpu:0'),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=False):
zero_model = model_builder(checkpoint=True)
zero_model = ShardedModelV2(zero_model,
shard_strategy,
offload_config=dict(device='cpu') if cpu_offload else None)
model = model_builder(checkpoint=True).half()
col_model_deepcopy(zero_model, model)
model = model.cuda().float()
2022-03-04 03:49:02 +00:00
if dist.get_world_size() > 1:
2022-03-09 08:09:36 +00:00
model = DDP(model)
2022-03-16 04:22:45 +00:00
if use_cpuadam:
optimizer_class = CPUAdam
optim = optimizer_class(model.parameters(), lr=1e-3)
sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3)
sharded_optim = ShardedOptimizerV2(zero_model, sharded_optim, cpu_offload=cpu_offload, initial_scale=2**5)
2022-03-09 08:09:36 +00:00
for i, (data, label) in enumerate(train_dataloader):
# FIXME() if i > 5, the unittest will fail
2022-03-16 04:22:45 +00:00
if i > 3:
2022-03-09 08:09:36 +00:00
break
data, label = data.cuda(), label.cuda()
2022-03-16 04:22:45 +00:00
_run_step(model, optim, data, label, criterion, False)
_run_step(zero_model, sharded_optim, data, label, criterion, False)
2022-03-09 08:09:36 +00:00
check_sharded_params_padding(model, zero_model, loose=True)
for param in model.parameters():
assert not has_inf_or_nan(param)
2022-03-04 03:49:02 +00:00
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
_run_test_sharded_optim_v2()
2022-03-16 04:22:45 +00:00
# use_cpuadam = True can be used with cpu_offload = False
2022-03-09 08:09:36 +00:00
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [1, 2])
def test_sharded_optim_v2(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
2022-03-04 03:49:02 +00:00
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_sharded_optim_v2(world_size=2)