mirror of https://github.com/hpcaitech/ColossalAI
FoolPlayer
1 year ago
committed by
Frank Lee
2 changed files with 100 additions and 10 deletions
@ -0,0 +1,45 @@
|
||||
import torch |
||||
import torch.distributed as dist |
||||
import torch.nn as nn |
||||
from torch.testing import assert_close |
||||
|
||||
import colossalai |
||||
from colossalai.shardformer.layer.layers import VocabParallelEmbedding1D |
||||
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn |
||||
|
||||
|
||||
def check_vocab_embedding_1d(): |
||||
embedding = nn.Embedding(128, 32).to('cuda') |
||||
dist_embedding_1d = VocabParallelEmbedding1D.from_native_module(embedding, process_group=None) |
||||
|
||||
assert dist_embedding_1d.weight.shape == torch.Size([64, 32]) |
||||
assert dist_embedding_1d.num_embeddings == 64 |
||||
assert dist_embedding_1d.embed_dim == 32 |
||||
|
||||
# check embedding correctness |
||||
x = torch.randint(0, 128, (4, 32)).to('cuda') |
||||
org_out = embedding(x) |
||||
dist_out = dist_embedding_1d(x) |
||||
assert_close(org_out, dist_out) |
||||
|
||||
# check backward correctness |
||||
org_out.sum().backward() |
||||
dist_out.sum().backward() |
||||
|
||||
rank = dist.get_rank() |
||||
target_grad = torch.chunk(embedding.weight.grad, 2, dim=0)[rank] |
||||
assert_close(target_grad, dist_embedding_1d.weight.grad) |
||||
|
||||
|
||||
def run_dist(rank, world_size, port): |
||||
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl') |
||||
check_vocab_embedding_1d() |
||||
|
||||
|
||||
@rerun_if_address_is_in_use() |
||||
def test_vocab_embedding(): |
||||
spawn(run_dist, nprocs=2) |
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
test_vocab_embedding() |
Loading…
Reference in new issue