2022-04-26 07:10:47 +00:00
|
|
|
import torch
|
2022-05-19 04:44:59 +00:00
|
|
|
import pytest
|
2022-04-26 07:10:47 +00:00
|
|
|
from colossalai.tensor import ColoTensor
|
|
|
|
from numpy import allclose
|
|
|
|
|
2022-06-21 10:28:38 +00:00
|
|
|
import colossalai
|
|
|
|
from colossalai.utils import free_port
|
2022-07-12 02:24:05 +00:00
|
|
|
from colossalai.tensor import ColoTensorSpec
|
2022-06-21 10:28:38 +00:00
|
|
|
from colossalai.core import global_context as gpc
|
|
|
|
import torch.multiprocessing as mp
|
|
|
|
from colossalai.testing import rerun_if_address_is_in_use
|
|
|
|
from colossalai.utils import free_port
|
2022-07-11 07:51:48 +00:00
|
|
|
from colossalai.tensor import distspec, ColoTensor, ProcessGroup, ShardSpec, ReplicaSpec
|
2022-06-21 10:28:38 +00:00
|
|
|
from functools import partial
|
|
|
|
|
2022-04-26 07:10:47 +00:00
|
|
|
|
2022-07-06 08:15:16 +00:00
|
|
|
def _run_tensor_indexing():
|
|
|
|
pg = ProcessGroup()
|
2022-04-26 07:10:47 +00:00
|
|
|
torch_t = torch.randn(2, 3)
|
2022-07-06 08:15:16 +00:00
|
|
|
colo_t = ColoTensor(torch_t, ColoTensorSpec(pg))
|
2022-05-19 04:44:59 +00:00
|
|
|
assert allclose(torch_t[:, 1], colo_t[:, 1])
|
2022-04-26 07:10:47 +00:00
|
|
|
|
|
|
|
|
2022-07-06 08:15:16 +00:00
|
|
|
def _run_wrapped_tensor_func():
|
|
|
|
pg = ProcessGroup()
|
2022-04-27 02:57:49 +00:00
|
|
|
t_ref = torch.randn(4, 5)
|
2022-07-06 08:15:16 +00:00
|
|
|
t = ColoTensor.from_torch_tensor(t_ref.clone(), ColoTensorSpec(pg))
|
2022-04-27 02:57:49 +00:00
|
|
|
|
|
|
|
# non-func attr
|
|
|
|
assert t.is_cuda == t_ref.is_cuda
|
|
|
|
|
|
|
|
# return 1 torch.Tensor
|
|
|
|
t_abs = t.abs()
|
2022-05-19 04:44:59 +00:00
|
|
|
assert isinstance(t_abs, ColoTensor) and torch.equal(t_abs, t_ref.abs())
|
2022-04-27 02:57:49 +00:00
|
|
|
|
|
|
|
# return 1 non-torch.Tensor
|
|
|
|
assert t.dim() == t_ref.dim()
|
|
|
|
|
|
|
|
# return >1 torch.Tensor
|
2022-07-06 08:15:16 +00:00
|
|
|
assert isinstance(t, ColoTensor)
|
2022-04-27 02:57:49 +00:00
|
|
|
t_split1, t_split2 = t.split(2)
|
2022-07-06 08:15:16 +00:00
|
|
|
assert isinstance(t_split1, ColoTensor) and isinstance(t_split2, ColoTensor), f"{type(t_split1)} {type(t_split2)}"
|
2022-04-27 02:57:49 +00:00
|
|
|
|
|
|
|
|
2022-07-08 06:55:27 +00:00
|
|
|
def _run_operand(world_size):
|
2022-07-06 08:15:16 +00:00
|
|
|
pg = ProcessGroup()
|
2022-04-27 02:57:49 +00:00
|
|
|
t_ref = torch.randn(4, 5)
|
2022-07-06 08:15:16 +00:00
|
|
|
t = ColoTensor.from_torch_tensor(t_ref.clone(), ColoTensorSpec(pg))
|
2022-04-27 02:57:49 +00:00
|
|
|
|
|
|
|
t_ref_res = t_ref + t_ref
|
|
|
|
t_res = t + t
|
2022-07-07 10:09:18 +00:00
|
|
|
|
|
|
|
assert isinstance(t_res, ColoTensor)
|
2022-04-27 02:57:49 +00:00
|
|
|
assert torch.allclose(t_ref_res, t_res)
|
2022-05-30 09:23:44 +00:00
|
|
|
|
2022-07-08 06:55:27 +00:00
|
|
|
pg = ProcessGroup(tp_degree=world_size)
|
|
|
|
t = ColoTensor.from_torch_tensor(t_ref.clone(), ColoTensorSpec(pg))
|
2022-07-11 07:51:48 +00:00
|
|
|
t.set_dist_spec(ShardSpec([0], [world_size]))
|
2022-07-08 06:55:27 +00:00
|
|
|
t_new = torch.zeros_like(t)
|
|
|
|
assert isinstance(t_new, ColoTensor)
|
|
|
|
assert t_new.is_sharded()
|
|
|
|
|
2022-06-21 10:28:38 +00:00
|
|
|
|
|
|
|
#### Test Distributed init a Colotensor
|
|
|
|
|
|
|
|
|
2022-06-27 01:45:26 +00:00
|
|
|
def _run_view(world_size):
|
|
|
|
t_ref = torch.randn(4, 5)
|
2022-06-29 02:03:09 +00:00
|
|
|
rank = gpc.get_global_rank()
|
2022-07-04 10:54:37 +00:00
|
|
|
pg = ProcessGroup(rank, list(range(world_size)), tp_degree=world_size)
|
2022-06-27 01:45:26 +00:00
|
|
|
t = ColoTensor.from_torch_tensor(
|
2022-07-11 07:51:48 +00:00
|
|
|
t_ref, ColoTensorSpec(pg, dist_attr=ShardSpec(dims=[0], num_partitions=[pg.tp_world_size()])))
|
2022-06-27 01:45:26 +00:00
|
|
|
|
2022-06-27 10:38:34 +00:00
|
|
|
assert t.size_global()[0] == 4 * world_size
|
|
|
|
assert t.size_global(1) == 5
|
|
|
|
assert t.size_global() == torch.Size([4 * world_size, 5])
|
2022-06-27 01:45:26 +00:00
|
|
|
|
2022-06-27 10:38:34 +00:00
|
|
|
t = t.view_global(4 * 5 * world_size)
|
2022-06-27 01:45:26 +00:00
|
|
|
assert t.shape == torch.Size([4 * 5 * world_size])
|
|
|
|
|
|
|
|
|
2022-06-21 10:28:38 +00:00
|
|
|
def _run_tensor_shard_init(world_size):
|
|
|
|
t_ref = torch.randn(4, 5)
|
2022-07-06 08:15:16 +00:00
|
|
|
pg = ProcessGroup(tp_degree=world_size)
|
2022-07-11 07:51:48 +00:00
|
|
|
shard_attr = ShardSpec(dims=[0], num_partitions=[pg.tp_world_size()])
|
2022-07-06 08:15:16 +00:00
|
|
|
tensor_spec = ColoTensorSpec(pg, dist_attr=shard_attr)
|
2022-06-21 10:28:38 +00:00
|
|
|
t = ColoTensor.from_torch_tensor(t_ref.clone(), tensor_spec)
|
2022-07-12 02:24:05 +00:00
|
|
|
t.set_dist_spec(ReplicaSpec())
|
2022-07-11 03:41:29 +00:00
|
|
|
|
2022-07-04 10:54:37 +00:00
|
|
|
assert t.shape == torch.Size((4 * world_size, 5)), f"{t.shape} vs ({4 * world_size, 5})"
|
2022-06-21 10:28:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _run_tensor_replicated_init(world_size):
|
|
|
|
t_ref = torch.randn(4 * world_size, 5)
|
2022-07-06 08:15:16 +00:00
|
|
|
pg = ProcessGroup()
|
|
|
|
spec = ColoTensorSpec(pg)
|
|
|
|
t = ColoTensor.from_torch_tensor(t_ref.clone(), spec)
|
2022-06-21 10:28:38 +00:00
|
|
|
|
|
|
|
assert t.shape == torch.Size((4 * world_size, 5)), f"{t.shape}"
|
|
|
|
|
|
|
|
|
2022-07-04 10:54:37 +00:00
|
|
|
def _run_process_group(world_size):
|
|
|
|
pg1 = ProcessGroup()
|
|
|
|
pg2 = ProcessGroup()
|
|
|
|
assert pg1 == pg2
|
|
|
|
|
|
|
|
|
2022-07-12 02:24:05 +00:00
|
|
|
def _run_redistributed(world_size):
|
|
|
|
if world_size != 4:
|
|
|
|
return
|
|
|
|
pg1 = ProcessGroup(tp_degree=2, dp_degree=2)
|
|
|
|
pg2 = ProcessGroup(tp_degree=4, dp_degree=1)
|
|
|
|
|
|
|
|
spec1 = ColoTensorSpec(pg1)
|
|
|
|
t1 = ColoTensor.from_torch_tensor(torch.randn(2, 3, 4), spec1)
|
|
|
|
t1 = t1.redistribute(ShardSpec([0], [pg1.tp_world_size()]))
|
|
|
|
assert t1.is_sharded()
|
|
|
|
t1 = t1.redistribute(ShardSpec([-1], [pg2.tp_world_size()]), pg2)
|
|
|
|
assert t1.is_sharded()
|
|
|
|
pg3 = ProcessGroup(tp_degree=1, dp_degree=4)
|
|
|
|
t1 = t1.redistribute(ReplicaSpec(), pg3)
|
|
|
|
assert t1.is_replicate()
|
|
|
|
|
|
|
|
|
2022-07-12 07:51:06 +00:00
|
|
|
def _run_set_tensor_spec(world_size):
|
|
|
|
if world_size != 4:
|
|
|
|
return
|
|
|
|
pg = ProcessGroup(tp_degree=2, dp_degree=2)
|
|
|
|
spec1 = ColoTensorSpec(pg)
|
|
|
|
t1 = ColoTensor.from_torch_tensor(torch.randn(2, 3, 4), spec1)
|
|
|
|
|
|
|
|
dist_spec2 = (ShardSpec([-1], [pg.tp_world_size()]), None)
|
|
|
|
assert t1.is_replicate()
|
|
|
|
t1.set_dist_spec(*dist_spec2)
|
|
|
|
assert t1.is_shard_1dcol()
|
|
|
|
|
|
|
|
|
2022-06-27 01:45:26 +00:00
|
|
|
def run_dist_tests(rank, world_size, port):
|
2022-06-21 10:28:38 +00:00
|
|
|
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
|
|
|
_run_tensor_shard_init(world_size)
|
|
|
|
_run_tensor_replicated_init(world_size)
|
2022-06-27 01:45:26 +00:00
|
|
|
_run_view(world_size)
|
2022-07-04 10:54:37 +00:00
|
|
|
_run_process_group(world_size)
|
2022-07-06 08:15:16 +00:00
|
|
|
_run_tensor_indexing()
|
2022-07-08 06:55:27 +00:00
|
|
|
_run_operand(world_size)
|
|
|
|
_run_wrapped_tensor_func()
|
2022-07-12 02:24:05 +00:00
|
|
|
_run_redistributed(world_size)
|
2022-07-12 07:51:06 +00:00
|
|
|
_run_set_tensor_spec(world_size)
|
2022-06-21 10:28:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.dist
|
|
|
|
@pytest.mark.parametrize('world_size', [1, 2])
|
|
|
|
@rerun_if_address_is_in_use()
|
2022-06-27 07:56:11 +00:00
|
|
|
def test_dist_cases(world_size):
|
2022-06-27 01:45:26 +00:00
|
|
|
run_func = partial(run_dist_tests, world_size=world_size, port=free_port())
|
2022-06-21 10:28:38 +00:00
|
|
|
mp.spawn(run_func, nprocs=world_size)
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2022-07-12 02:24:05 +00:00
|
|
|
test_dist_cases(4)
|