diff --git a/tests/test_tensor/test_embedding_tp.py b/tests/test_tensor/test_embedding_tp.py index d1ea5cb3c..922992a5a 100644 --- a/tests/test_tensor/test_embedding_tp.py +++ b/tests/test_tensor/test_embedding_tp.py @@ -8,7 +8,7 @@ import colossalai import pytest import torch import torch.multiprocessing as mp -from colossalai.testing import parameterize, rerun_if_address_is_in_use +from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from colossalai.core import global_context as gpc @@ -121,7 +121,7 @@ def run_dist(rank, world_size, port): run_embedding_tp1d_row_test() @pytest.mark.dist -@parameterize('world_size', [1, 4]) +@pytest.mark.parametrize('world_size', [1, 4]) @rerun_if_address_is_in_use() def test_embedding_1d(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port()) diff --git a/tests/test_tensor/test_linear_tp.py b/tests/test_tensor/test_linear_tp.py index a57943f5d..83b85156c 100644 --- a/tests/test_tensor/test_linear_tp.py +++ b/tests/test_tensor/test_linear_tp.py @@ -8,7 +8,7 @@ import colossalai import pytest import torch import torch.multiprocessing as mp -from colossalai.testing import parameterize, rerun_if_address_is_in_use +from colossalai.testing import rerun_if_address_is_in_use from colossalai.utils.cuda import get_current_device from colossalai.utils import free_port from colossalai.core import global_context as gpc @@ -149,7 +149,7 @@ def run_dist(rank, world_size, port): run_linear_tp1d_col_test() @pytest.mark.dist -@parameterize('world_size', [1, 4]) +@pytest.mark.parametrize('world_size', [1, 4]) @rerun_if_address_is_in_use() def test_linear_1d(world_size): run_func = partial(run_dist, world_size=world_size, port=free_port())