[FAW] move coloparam setting in test code. (#1429)

pull/1425/head^2
Jiarui Fang 2022-08-10 14:31:53 +08:00 committed by GitHub
parent cb98cf5558
commit 10b3df65c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 6 additions and 6 deletions

View File

@ -67,9 +67,6 @@ class ParallelFreqAwareEmbeddingBag(BaseEmbeddingBag):
self.init_parameters()
else:
assert isinstance(_weight, ColoParameter), "initialized weight must in type of ColoParameter"
_weight.process_group = ProcessGroup(tp_degree=self.world_size)
_weight.set_tensor_spec(ShardSpec(dims=[-1], num_partitions=[self.world_size]),
ComputeSpec(ComputePattern.TP1D))
self._weight = _weight
@property

View File

@ -8,11 +8,9 @@ import random
import colossalai
from colossalai.utils import free_port
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.tensor import ColoParameter
from colossalai.tensor import ColoParameter, ProcessGroup, ShardSpec, ComputePattern, ComputeSpec
from colossalai.nn._ops.cache_embedding import CachedParamMgr, FreqAwareEmbeddingBag, ParallelFreqAwareEmbeddingBag
from colossalai.nn._ops.cache_embedding import CachedParamMgr, FreqAwareEmbeddingBag
NUM_EMBED, EMBED_DIM = 10, 8
BATCH_SIZE = 8
@ -161,6 +159,11 @@ def run_parallel_freq_aware_embed(rank, world_size):
weight = torch.rand(num_embed, embed_dim)
coloweight = ColoParameter(weight.clone().detach().cpu(), requires_grad=False)
# initialize the tensor spec for the embedding weight parameter,
# which is an ColoParameter.
coloweight.process_group = ProcessGroup(tp_degree=world_size)
coloweight.set_tensor_spec(ShardSpec(dims=[-1], num_partitions=[world_size]), ComputeSpec(ComputePattern.TP1D))
model = ParallelFreqAwareEmbeddingBag.from_pretrained(coloweight,
include_last_offset=True,
freeze=False,