2022-03-24 06:29:41 +00:00
|
|
|
import torch
|
|
|
|
|
2023-04-04 05:48:16 +00:00
|
|
|
import colossalai
|
2023-04-06 06:51:35 +00:00
|
|
|
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
2023-04-04 05:48:16 +00:00
|
|
|
from colossalai.zero.legacy.gemini.tensor_utils import colo_model_data_tensor_move, colo_model_data_tensor_move_inline
|
|
|
|
from colossalai.zero.legacy.sharded_param import ShardedTensor
|
|
|
|
|
2022-03-24 06:29:41 +00:00
|
|
|
|
2023-04-06 06:51:35 +00:00
|
|
|
def run_tensor_move(rank, world_size, port):
|
|
|
|
colossalai.launch(config={}, rank=0, world_size=world_size, host='localhost', port=port, backend='nccl')
|
2022-03-24 06:29:41 +00:00
|
|
|
|
|
|
|
src_t = torch.ones(2, 3).cuda()
|
|
|
|
tgt_t = torch.zeros(2, 3)
|
|
|
|
|
|
|
|
colo_model_data_tensor_move(src_t, tgt_t)
|
|
|
|
assert (torch.sum(tgt_t) == 6.0), f"{torch.sum(tgt_t.payload)} vs. 6.0"
|
|
|
|
|
|
|
|
src_t = torch.ones(2, 3)
|
|
|
|
tgt_t = torch.zeros(2, 3).cuda().half()
|
|
|
|
colo_model_data_tensor_move(src_t, tgt_t)
|
|
|
|
# the src_t has been removed
|
|
|
|
assert (src_t.numel() == 0)
|
|
|
|
assert (torch.sum(tgt_t) == 6.0), f"{torch.sum(tgt_t.payload)} vs. 6.0"
|
|
|
|
|
|
|
|
src_t = ShardedTensor(torch.ones(2, 3))
|
|
|
|
tgt_t = ShardedTensor(torch.zeros(2, 3).cuda().half())
|
|
|
|
colo_model_data_tensor_move(src_t, tgt_t)
|
|
|
|
assert (torch.sum(tgt_t.payload) == 6.0), f"{torch.sum(tgt_t.payload)} vs. 6.0"
|
2022-03-25 06:02:55 +00:00
|
|
|
|
|
|
|
assert (tgt_t.device.type == 'cuda')
|
|
|
|
colo_model_data_tensor_move_inline(tgt_t, torch.device('cpu'))
|
|
|
|
assert (tgt_t.device.type == 'cpu')
|
2022-03-24 06:29:41 +00:00
|
|
|
|
|
|
|
|
2022-04-14 16:33:04 +00:00
|
|
|
@rerun_if_address_is_in_use()
|
2022-03-24 06:29:41 +00:00
|
|
|
def test_tensor_move():
|
2023-04-06 06:51:35 +00:00
|
|
|
spawn(run_tensor_move, 1)
|
2022-03-24 06:29:41 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
test_tensor_move()
|