2023-06-20 03:45:16 +00:00
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
|
|
|
|
import colossalai
|
|
|
|
from colossalai.logging import disable_existing_loggers
|
2023-07-04 01:57:03 +00:00
|
|
|
from colossalai.tensor.d_tensor.api import is_customized_distributed_tensor, is_distributed_tensor
|
|
|
|
from colossalai.testing import (
|
|
|
|
assert_hf_output_close,
|
|
|
|
clear_cache_before_run,
|
|
|
|
parameterize,
|
|
|
|
rerun_if_address_is_in_use,
|
|
|
|
spawn,
|
|
|
|
)
|
2023-06-22 02:33:06 +00:00
|
|
|
from tests.kit.model_zoo import model_zoo
|
|
|
|
from tests.test_shardformer.test_model._utils import build_model, run_forward
|
2023-06-20 03:45:16 +00:00
|
|
|
|
|
|
|
|
2023-06-22 02:33:06 +00:00
|
|
|
def check_forward_backward(org_model, sharded_model, data_gen_fn, output_transform_fn, loss_fn):
|
|
|
|
# check forward
|
|
|
|
org_output, org_loss, shard_output, shard_loss = run_forward(org_model, sharded_model, data_gen_fn,
|
|
|
|
output_transform_fn, loss_fn)
|
|
|
|
assert_hf_output_close(org_output, shard_output, ignore_keys=['past_key_values'])
|
2023-06-20 03:45:16 +00:00
|
|
|
|
2023-06-22 02:33:06 +00:00
|
|
|
# do backward
|
2023-06-20 03:45:16 +00:00
|
|
|
org_loss.backward()
|
|
|
|
shard_loss.backward()
|
2023-06-22 02:33:06 +00:00
|
|
|
|
2023-06-30 08:16:44 +00:00
|
|
|
assert torch.allclose(org_loss, shard_loss,
|
|
|
|
atol=1e-5), f"shard model loss is not equal to origin model loss\n{org_loss}\n{shard_loss}"
|
|
|
|
|
|
|
|
# unwrap model
|
2023-06-22 02:33:06 +00:00
|
|
|
if org_model.__class__.__name__ == 'GPT2Model':
|
2023-06-30 08:16:44 +00:00
|
|
|
org_model = org_model
|
|
|
|
sharded_model = sharded_model
|
2023-06-22 02:33:06 +00:00
|
|
|
else:
|
2023-06-30 08:16:44 +00:00
|
|
|
org_model = org_model.transformer
|
|
|
|
sharded_model = sharded_model.transformer
|
|
|
|
|
|
|
|
# check mlp grad
|
|
|
|
org_grad = org_model.h[0].mlp.c_fc.weight.grad
|
|
|
|
shard_grad = sharded_model.h[0].mlp.c_fc.weight.grad
|
2023-07-04 01:57:03 +00:00
|
|
|
shard_weight = sharded_model.h[0].mlp.c_fc.weight
|
2023-06-20 03:45:16 +00:00
|
|
|
|
2023-07-04 01:57:03 +00:00
|
|
|
if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
|
|
|
|
shard_grad_list = [torch.zeros([*shard_grad.shape]).to('cuda') for _ in range(2)]
|
|
|
|
shard_grad = torch.distributed.all_gather(shard_grad_list, shard_grad)
|
|
|
|
all_shard_grad = torch.cat(shard_grad_list, dim=1)
|
|
|
|
else:
|
|
|
|
all_shard_grad = shard_grad
|
2023-06-30 08:16:44 +00:00
|
|
|
assert torch.allclose(
|
|
|
|
org_grad, all_shard_grad,
|
|
|
|
atol=1e-5), f"shard model grad is not equal to origin model grad\n{org_grad}\n{all_shard_grad}"
|
|
|
|
|
|
|
|
# check embedding weights
|
|
|
|
org_grad = org_model.wte.weight.grad
|
|
|
|
shard_grad = sharded_model.wte.weight.grad
|
2023-07-04 01:57:03 +00:00
|
|
|
shard_weight = sharded_model.wte.weight
|
2023-06-30 08:16:44 +00:00
|
|
|
|
2023-07-04 01:57:03 +00:00
|
|
|
if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
|
|
|
|
shard_grad_list = [torch.zeros([*shard_grad.shape]).to('cuda') for _ in range(2)]
|
|
|
|
shard_grad = torch.distributed.all_gather(shard_grad_list, shard_grad)
|
|
|
|
all_shard_grad = torch.cat(shard_grad_list, dim=0)
|
|
|
|
else:
|
|
|
|
all_shard_grad = shard_grad
|
2023-06-23 08:07:09 +00:00
|
|
|
assert torch.allclose(
|
|
|
|
org_grad, all_shard_grad,
|
|
|
|
atol=1e-5), f"shard model grad is not equal to origin model grad\n{org_grad}\n{all_shard_grad}"
|
2023-06-20 03:45:16 +00:00
|
|
|
|
|
|
|
|
2023-07-04 01:57:03 +00:00
|
|
|
@parameterize('enable_fused_normalization', [True, False])
|
|
|
|
@parameterize('enable_tensor_parallelism', [True, False])
|
|
|
|
def run_gpt2_test(enable_fused_normalization, enable_tensor_parallelism):
|
2023-06-22 02:33:06 +00:00
|
|
|
sub_model_zoo = model_zoo.get_sub_registry('transformers_gpt')
|
|
|
|
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
|
2023-07-04 01:57:03 +00:00
|
|
|
org_model, sharded_model = build_model(model_fn, enable_fused_normalization, enable_tensor_parallelism)
|
2023-06-22 02:33:06 +00:00
|
|
|
check_forward_backward(org_model, sharded_model, data_gen_fn, output_transform_fn, loss_fn)
|
|
|
|
torch.cuda.empty_cache()
|
2023-06-20 03:45:16 +00:00
|
|
|
|
|
|
|
|
2023-07-04 01:57:03 +00:00
|
|
|
def check_gpt2(rank, world_size, port):
|
|
|
|
disable_existing_loggers()
|
|
|
|
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
|
|
|
|
run_gpt2_test()
|
|
|
|
|
|
|
|
|
2023-06-20 03:45:16 +00:00
|
|
|
@pytest.mark.dist
|
|
|
|
@rerun_if_address_is_in_use()
|
2023-06-22 02:33:06 +00:00
|
|
|
@clear_cache_before_run()
|
2023-06-20 03:45:16 +00:00
|
|
|
def test_gpt2():
|
2023-06-22 02:33:06 +00:00
|
|
|
spawn(check_gpt2, 2)
|
2023-06-20 03:45:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
test_gpt2()
|