diff --git a/tests/test_fx/test_comm_size_compute.py b/tests/test_fx/test_comm_size_compute.py index bc040bcca..f89d7c06a 100644 --- a/tests/test_fx/test_comm_size_compute.py +++ b/tests/test_fx/test_comm_size_compute.py @@ -6,6 +6,7 @@ from torch.fx import symbolic_trace from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, uniform_split_pass from colossalai.fx.passes.utils import get_comm_size +import pytest MODEL_DIM = 16 BATCH_SIZE = 8 @@ -29,6 +30,7 @@ class MLP(torch.nn.Module): return x +@pytest.mark.skip('skip due to CI environment') def test_comm_size_compute(): model = MLP(MODEL_DIM) input_sample = torch.rand(BATCH_SIZE, MODEL_DIM) diff --git a/tests/test_fx/test_pipeline_passes.py b/tests/test_fx/test_pipeline_passes.py index 228fcb880..54619d25c 100644 --- a/tests/test_fx/test_pipeline_passes.py +++ b/tests/test_fx/test_pipeline_passes.py @@ -5,6 +5,7 @@ import colossalai.nn as col_nn from torch.fx import symbolic_trace from colossalai.fx.passes.adding_split_node_pass import split_with_split_nodes_pass, balanced_split_pass, \ uniform_split_pass +import pytest MODEL_DIM = 16 BATCH_SIZE = 8 @@ -37,6 +38,7 @@ def pipeline_pass_test_helper(model, data, pass_func): assert output.equal(origin_output) +@pytest.mark.skip('skip due to CI environment') def test_pipeline_passes(): model = MLP(MODEL_DIM) data = torch.rand(BATCH_SIZE, MODEL_DIM)