2023-03-23 02:53:06 +00:00
|
|
|
from typing import Optional
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
|
|
|
|
|
|
|
import colossalai
|
|
|
|
from colossalai.device.device_mesh import DeviceMesh
|
|
|
|
from colossalai.tensor.d_tensor.layout import Layout
|
|
|
|
from colossalai.tensor.d_tensor.sharding_spec import ShardingSpec
|
2023-04-06 06:51:35 +00:00
|
|
|
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
2023-03-23 02:53:06 +00:00
|
|
|
from colossalai.utils.common import print_rank_0
|
2023-03-24 04:15:33 +00:00
|
|
|
|
|
|
|
try:
|
2023-06-05 06:20:47 +00:00
|
|
|
from colossalai.lazy.lazy_init import LazyInitContext, LazyTensor, _MyTensor
|
2023-03-24 04:15:33 +00:00
|
|
|
except:
|
|
|
|
pass
|
2023-05-23 03:57:15 +00:00
|
|
|
from lazy_init_utils import SUPPORT_LAZY, assert_dist_model_equal, set_seed
|
2023-03-23 02:53:06 +00:00
|
|
|
|
2023-05-15 09:20:56 +00:00
|
|
|
from tests.kit.model_zoo import model_zoo
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
def find_shard_dim(shape: torch.Size) -> Optional[int]:
|
|
|
|
for dim, size in enumerate(shape):
|
|
|
|
if size % 2 == 0:
|
|
|
|
return dim
|
|
|
|
|
|
|
|
|
2023-06-09 01:41:27 +00:00
|
|
|
def make_layout(device_mesh: DeviceMesh, original_tensor: torch.Tensor) -> Layout:
|
2023-03-23 02:53:06 +00:00
|
|
|
shard_dim = find_shard_dim(original_tensor.shape)
|
|
|
|
dim_partition_dict = {shard_dim: [0]} if shard_dim is not None else {}
|
|
|
|
target_sharding_spec = ShardingSpec(dim_size=original_tensor.dim(), dim_partition_dict=dim_partition_dict)
|
2023-06-09 01:41:27 +00:00
|
|
|
layout = Layout(device_mesh=device_mesh,
|
|
|
|
device_type=torch.device('cuda'),
|
|
|
|
sharding_spec=target_sharding_spec,
|
|
|
|
entire_shape=original_tensor.shape)
|
|
|
|
return layout
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _get_current_name(prefix: str, name: str) -> str:
|
|
|
|
return f'{prefix}.{name}'.lstrip('.')
|
|
|
|
|
|
|
|
|
2023-06-09 01:41:27 +00:00
|
|
|
def generate_layout_dict(model: nn.Module, device_mesh: DeviceMesh) -> dict:
|
|
|
|
layout_dict = {}
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
@torch.no_grad()
|
|
|
|
def generate_recursively(module: nn.Module, prefix: str = ''):
|
|
|
|
# recursively initialize the module
|
|
|
|
for name, mod in module.named_children():
|
|
|
|
generate_recursively(mod, prefix=_get_current_name(prefix, name))
|
|
|
|
|
|
|
|
# initialize tensors directly attached to the current module
|
|
|
|
for name, param in module.named_parameters(recurse=False):
|
|
|
|
if isinstance(param, LazyTensor):
|
2023-06-09 01:41:27 +00:00
|
|
|
layout = make_layout(device_mesh, param)
|
|
|
|
layout_dict[_get_current_name(prefix, name)] = layout
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
for name, buf in module.named_buffers(recurse=False):
|
|
|
|
if isinstance(buf, LazyTensor):
|
2023-06-09 01:41:27 +00:00
|
|
|
layout = make_layout(device_mesh, buf)
|
|
|
|
layout_dict[_get_current_name(prefix, name)] = layout
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
generate_recursively(model)
|
|
|
|
|
2023-06-09 01:41:27 +00:00
|
|
|
return layout_dict
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
@parameterize('subset', ['torchvision', 'diffusers', 'timm', 'transformers', 'torchaudio', 'deepfm', 'dlrm'])
|
|
|
|
def run_dist_lazy_init(subset, seed: int = 42):
|
|
|
|
sub_model_zoo = model_zoo.get_sub_registry(subset)
|
|
|
|
device_mesh = DeviceMesh(torch.Tensor([0, 1, 2, 3]), (2, 2), init_process_group=True)
|
2023-05-15 09:20:56 +00:00
|
|
|
_MyTensor._pre_op_fn = lambda *args: set_seed(seed)
|
|
|
|
LazyTensor._pre_op_fn = lambda *args: set_seed(seed)
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
for name, entry in sub_model_zoo.items():
|
|
|
|
# TODO(ver217): lazy init does not support weight norm, skip these models
|
|
|
|
if name in ('torchaudio_wav2vec2_base', 'torchaudio_hubert_base'):
|
|
|
|
continue
|
|
|
|
print_rank_0(name)
|
|
|
|
model_fn, data_gen_fn, output_transform_fn, model_attr = entry
|
|
|
|
ctx = LazyInitContext(tensor_cls=_MyTensor)
|
|
|
|
with ctx:
|
|
|
|
model = model_fn()
|
|
|
|
ctx = LazyInitContext()
|
|
|
|
with ctx:
|
|
|
|
deferred_model = model_fn()
|
2023-06-09 01:41:27 +00:00
|
|
|
layout_dict = generate_layout_dict(deferred_model, device_mesh)
|
|
|
|
ctx.distribute(deferred_model, layout_dict, verbose=True)
|
|
|
|
assert_dist_model_equal(model, deferred_model, layout_dict)
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
def run_dist(rank, world_size, port) -> None:
|
|
|
|
colossalai.launch({}, rank=rank, world_size=world_size, host='localhost', port=port)
|
|
|
|
run_dist_lazy_init()
|
|
|
|
|
|
|
|
|
2023-05-15 09:20:56 +00:00
|
|
|
@pytest.mark.skipif(not SUPPORT_LAZY, reason='torch version should be >= 1.12.0')
|
2023-03-23 02:53:06 +00:00
|
|
|
@pytest.mark.dist
|
|
|
|
@rerun_if_address_is_in_use()
|
|
|
|
def test_dist_lazy_init():
|
2023-04-06 06:51:35 +00:00
|
|
|
spawn(run_dist, 4)
|
2023-03-23 02:53:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
test_dist_lazy_init()
|