mirror of https://github.com/hpcaitech/ColossalAI
[Test/CI] remove test cases to reduce CI duration (#5753)
* [test] smaller gpt2 test case
* [test] reduce test cases: tests/test_zero/test_gemini/test_zeroddp_state_dict.py
* [test] reduce test cases: tests/test_zero/test_gemini/test_grad_accum.py
* [test] reduce test cases tests/test_zero/test_gemini/test_optim.py
* Revert "[test] smaller gpt2 test case"
Some tests might depend on the size of model (num of chunks)
This reverts commit df705a5210
.
* [test] reduce test cases: tests/test_checkpoint_io/test_gemini_checkpoint_io.py
* [CI] smaller test model for two mwo the two modifid cases
* [CI] hardcode gpt model for tests/test_zero/test_gemini/test_search.py since we need a fixed answer there
pull/5782/head
parent
79f7a7b211
commit
80c3c8789b
|
@ -18,23 +18,8 @@ def data_gen():
|
||||||
# tokenized_input = tokenizer(input, return_tensors='pt')
|
# tokenized_input = tokenizer(input, return_tensors='pt')
|
||||||
# input_ids = tokenized_input['input_ids']
|
# input_ids = tokenized_input['input_ids']
|
||||||
# attention_mask = tokenized_input['attention_mask']
|
# attention_mask = tokenized_input['attention_mask']
|
||||||
# input_ids = torch.tensor([[15496, 11, 616, 3290, 318, 13779, 318, 13779]], dtype=torch.int64)
|
input_ids = torch.tensor([[22, 11, 616, 4, 5, 13, 318, 345]], dtype=torch.int64)
|
||||||
# attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
||||||
input_ids = torch.tensor(
|
|
||||||
[
|
|
||||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
|
||||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
|
||||||
],
|
|
||||||
dtype=torch.int64,
|
|
||||||
)
|
|
||||||
attention_mask = torch.tensor(
|
|
||||||
[
|
|
||||||
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
|
||||||
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
|
|
||||||
],
|
|
||||||
dtype=torch.int64,
|
|
||||||
)
|
|
||||||
|
|
||||||
return dict(input_ids=input_ids, attention_mask=attention_mask)
|
return dict(input_ids=input_ids, attention_mask=attention_mask)
|
||||||
|
|
||||||
|
|
||||||
|
@ -50,9 +35,9 @@ def data_gen_for_question_answering():
|
||||||
# question answering data gen
|
# question answering data gen
|
||||||
# `labels` is the type not the token id for token classification, 0 or 1
|
# `labels` is the type not the token id for token classification, 0 or 1
|
||||||
data = data_gen()
|
data = data_gen()
|
||||||
start_positions = torch.tensor([[0], [0]], dtype=torch.int64)
|
start_positions = torch.tensor([0], dtype=torch.int64)
|
||||||
data["start_positions"] = start_positions
|
data["start_positions"] = start_positions
|
||||||
end_positions = torch.tensor([[1], [1]], dtype=torch.int64)
|
end_positions = torch.tensor([1], dtype=torch.int64)
|
||||||
data["end_positions"] = end_positions
|
data["end_positions"] = end_positions
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@ -61,20 +46,14 @@ def data_gen_for_token_classification():
|
||||||
# token classification data gen
|
# token classification data gen
|
||||||
# `labels` is the type not the token id for token classification, 0 or 1
|
# `labels` is the type not the token id for token classification, 0 or 1
|
||||||
data = data_gen()
|
data = data_gen()
|
||||||
data["labels"] = torch.tensor(
|
data["labels"] = torch.tensor([[0, 0, 0, 0, 0, 0, 0, 1]], dtype=torch.int64)
|
||||||
[
|
|
||||||
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
|
|
||||||
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
|
|
||||||
],
|
|
||||||
dtype=torch.int64,
|
|
||||||
)
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def data_gen_for_sequence_classification():
|
def data_gen_for_sequence_classification():
|
||||||
# sequence classification data gen
|
# sequence classification data gen
|
||||||
data = data_gen()
|
data = data_gen()
|
||||||
data["labels"] = torch.tensor([[1], [1]], dtype=torch.int64)
|
data["labels"] = torch.tensor([1], dtype=torch.int64)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
@ -82,18 +61,12 @@ def date_gen_for_double_heads():
|
||||||
num_choices = 2
|
num_choices = 2
|
||||||
batch_size = 2
|
batch_size = 2
|
||||||
input_ids = torch.tensor(
|
input_ids = torch.tensor(
|
||||||
[
|
[[46, 11, 616, 432, 318, 19, 318, 555], [777, 11, 235, 333, 318, 231, 468, 136]],
|
||||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
|
||||||
[15496, 11, 616, 3290, 318, 13779, 318, 13779, 15496, 11, 616, 3290, 318, 13779, 318, 13779],
|
|
||||||
],
|
|
||||||
dtype=torch.int64,
|
dtype=torch.int64,
|
||||||
)
|
)
|
||||||
attention_mask = torch.tensor(
|
attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64)
|
||||||
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],
|
|
||||||
dtype=torch.int64,
|
|
||||||
)
|
|
||||||
|
|
||||||
mc_labels = torch.zeros(input_ids.shape[0], dtype=torch.int64)
|
mc_labels = torch.zeros(input_ids.shape[0], dtype=torch.int64)
|
||||||
|
|
||||||
mc_token_ids = torch.arange(0, num_choices, dtype=torch.int64)
|
mc_token_ids = torch.arange(0, num_choices, dtype=torch.int64)
|
||||||
mc_token_ids = mc_token_ids.expand((batch_size, num_choices))
|
mc_token_ids = mc_token_ids.expand((batch_size, num_choices))
|
||||||
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, num_choices, -1).contiguous()
|
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, num_choices, -1).contiguous()
|
||||||
|
@ -122,14 +95,14 @@ config = transformers.GPT2Config(
|
||||||
n_layer=2,
|
n_layer=2,
|
||||||
n_head=4,
|
n_head=4,
|
||||||
n_embd=128,
|
n_embd=128,
|
||||||
vocab_size=50258,
|
vocab_size=1024,
|
||||||
attn_pdrop=0,
|
attn_pdrop=0,
|
||||||
embd_pdrop=0,
|
embd_pdrop=0,
|
||||||
resid_pdrop=0,
|
resid_pdrop=0,
|
||||||
summary_first_dropout=0,
|
summary_first_dropout=0,
|
||||||
hidden_dropout=0,
|
hidden_dropout=0,
|
||||||
problem_type="single_label_classification",
|
problem_type="single_label_classification",
|
||||||
pad_token_id=50256,
|
pad_token_id=1022,
|
||||||
tie_word_embeddings=True,
|
tie_word_embeddings=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -21,14 +21,10 @@ from colossalai.testing import (
|
||||||
from tests.kit.model_zoo import model_zoo
|
from tests.kit.model_zoo import model_zoo
|
||||||
|
|
||||||
MODEL_PLACEMENT_CONFIGS = [
|
MODEL_PLACEMENT_CONFIGS = [
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0}, # zero2
|
{"placement_policy": "static", "shard_param_frac": 0.5},
|
||||||
{"placement_policy": "static", "shard_param_frac": 1.0}, # zero3
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.5}, # zero3-half
|
|
||||||
]
|
]
|
||||||
|
|
||||||
OPTIM_PLACEMENT_CONFIGS = [
|
OPTIM_PLACEMENT_CONFIGS = [
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 0.0}, # zero2
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 1.0}, # zero2-offload
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 0.5}, # zero2-offload-half
|
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 0.5}, # zero2-offload-half
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -15,9 +15,7 @@ from colossalai.zero.gemini.chunk import search_chunk_configuration
|
||||||
from tests.kit.model_zoo import model_zoo, run_fwd
|
from tests.kit.model_zoo import model_zoo, run_fwd
|
||||||
|
|
||||||
PLACEMENT_CONFIGS = [
|
PLACEMENT_CONFIGS = [
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0}, # zero2
|
{"placement_policy": "static", "shard_param_frac": 0.75},
|
||||||
{"placement_policy": "static", "shard_param_frac": 1.0}, # zero3
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.5}, # zero3-half
|
|
||||||
{"placement_policy": "auto"},
|
{"placement_policy": "auto"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -109,7 +107,7 @@ def exam_gemini_grad_acc(
|
||||||
torch_model = DDP(torch_model, device_ids=[rank])
|
torch_model = DDP(torch_model, device_ids=[rank])
|
||||||
|
|
||||||
set_seed(rank)
|
set_seed(rank)
|
||||||
accum_iter = 4
|
accum_iter = 2
|
||||||
train_dataloader = DummyDataloader(data_gen_fn)
|
train_dataloader = DummyDataloader(data_gen_fn)
|
||||||
for i, data in enumerate(train_dataloader):
|
for i, data in enumerate(train_dataloader):
|
||||||
delay_unscale = False if (i + 1) % accum_iter == 0 else True
|
delay_unscale = False if (i + 1) % accum_iter == 0 else True
|
||||||
|
|
|
@ -15,17 +15,7 @@ from colossalai.zero.gemini.chunk import search_chunk_configuration
|
||||||
from tests.kit.model_zoo import model_zoo, run_fwd_bwd
|
from tests.kit.model_zoo import model_zoo, run_fwd_bwd
|
||||||
|
|
||||||
PLACEMENT_CONFIGS = [
|
PLACEMENT_CONFIGS = [
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 0.0}, # zero2
|
{"placement_policy": "static", "shard_param_frac": 0.3, "offload_param_frac": 0.3, "offload_optim_frac": 0.3},
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 1.0}, # zero2-offload
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0, "offload_optim_frac": 0.5}, # zero2-offload-half
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 1.0}, # zero3
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.5}, # zero3-half
|
|
||||||
{
|
|
||||||
"placement_policy": "static",
|
|
||||||
"shard_param_frac": 1.0,
|
|
||||||
"offload_optim_frac": 1.0,
|
|
||||||
"offload_param_frac": 1.0,
|
|
||||||
}, # zero3-offload-all
|
|
||||||
{"placement_policy": "auto"},
|
{"placement_policy": "auto"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -73,7 +63,7 @@ def check_param(model: GeminiDDP, torch_model: torch.nn.Module, dtype: torch.dty
|
||||||
@parameterize("model_name", TEST_MODELS)
|
@parameterize("model_name", TEST_MODELS)
|
||||||
@parameterize("mixed_precision", [torch.half, torch.bfloat16])
|
@parameterize("mixed_precision", [torch.half, torch.bfloat16])
|
||||||
@parameterize("master_weights", [True, False])
|
@parameterize("master_weights", [True, False])
|
||||||
@parameterize("enable_async_reduce", [False, True])
|
@parameterize("enable_async_reduce", [True])
|
||||||
def exam_model_step(
|
def exam_model_step(
|
||||||
placement_config, model_name: str, mixed_precision: torch.dtype, master_weights: bool, enable_async_reduce=True
|
placement_config, model_name: str, mixed_precision: torch.dtype, master_weights: bool, enable_async_reduce=True
|
||||||
):
|
):
|
||||||
|
@ -136,7 +126,7 @@ def exam_model_step(
|
||||||
check_param(model, torch_model, mixed_precision)
|
check_param(model, torch_model, mixed_precision)
|
||||||
|
|
||||||
|
|
||||||
@parameterize("placement_config", [PLACEMENT_CONFIGS[3]])
|
@parameterize("placement_config", [{"placement_policy": "static", "shard_param_frac": 1.0}])
|
||||||
@parameterize("model_name", EXAMPLE_MODELS)
|
@parameterize("model_name", EXAMPLE_MODELS)
|
||||||
@parameterize("mixed_precision", [torch.half])
|
@parameterize("mixed_precision", [torch.half])
|
||||||
def exam_tiny_example(placement_config, model_name: str, mixed_precision: torch.dtype):
|
def exam_tiny_example(placement_config, model_name: str, mixed_precision: torch.dtype):
|
||||||
|
@ -197,7 +187,7 @@ def run_dist(rank, world_size, port):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.dist
|
@pytest.mark.dist
|
||||||
@pytest.mark.parametrize("world_size", [1, 4])
|
@pytest.mark.parametrize("world_size", [4])
|
||||||
@rerun_if_address_is_in_use()
|
@rerun_if_address_is_in_use()
|
||||||
def test_optim(world_size):
|
def test_optim(world_size):
|
||||||
spawn(run_dist, world_size)
|
spawn(run_dist, world_size)
|
||||||
|
|
|
@ -1,18 +1,31 @@
|
||||||
import pytest
|
import pytest
|
||||||
import torch
|
import torch
|
||||||
|
import transformers
|
||||||
|
|
||||||
import colossalai
|
import colossalai
|
||||||
from colossalai.accelerator import get_accelerator
|
from colossalai.accelerator import get_accelerator
|
||||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||||
from colossalai.zero.gemini.chunk import init_chunk_manager, search_chunk_configuration
|
from colossalai.zero.gemini.chunk import init_chunk_manager, search_chunk_configuration
|
||||||
from tests.kit.model_zoo import model_zoo
|
|
||||||
|
CONFIG = transformers.GPT2Config(
|
||||||
|
n_layer=2,
|
||||||
|
n_head=4,
|
||||||
|
n_embd=128,
|
||||||
|
vocab_size=50258,
|
||||||
|
attn_pdrop=0,
|
||||||
|
embd_pdrop=0,
|
||||||
|
resid_pdrop=0,
|
||||||
|
summary_first_dropout=0,
|
||||||
|
hidden_dropout=0,
|
||||||
|
problem_type="single_label_classification",
|
||||||
|
pad_token_id=50256,
|
||||||
|
tie_word_embeddings=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
model_builder = lambda: transformers.GPT2LMHeadModel(CONFIG)
|
||||||
|
|
||||||
|
|
||||||
def exam_search_chunk_size():
|
def exam_search_chunk_size():
|
||||||
model_builder, data_gen_fn, output_transform_fn, *_ = next(
|
|
||||||
iter(model_zoo.get_sub_registry("transformers_gpt_lm").values())
|
|
||||||
)
|
|
||||||
|
|
||||||
# make sure torch_model and model has the same parameter values
|
# make sure torch_model and model has the same parameter values
|
||||||
model = model_builder()
|
model = model_builder()
|
||||||
config_dict, *_ = search_chunk_configuration(
|
config_dict, *_ = search_chunk_configuration(
|
||||||
|
@ -27,10 +40,6 @@ def exam_search_chunk_size():
|
||||||
def exam_chunk_manager():
|
def exam_chunk_manager():
|
||||||
world_size = torch.distributed.get_world_size()
|
world_size = torch.distributed.get_world_size()
|
||||||
|
|
||||||
model_builder, data_gen_fn, output_transform_fn, *_ = next(
|
|
||||||
iter(model_zoo.get_sub_registry("transformers_gpt_lm").values())
|
|
||||||
)
|
|
||||||
|
|
||||||
sharded_ddp_model = model_builder()
|
sharded_ddp_model = model_builder()
|
||||||
chunk_manager = init_chunk_manager(
|
chunk_manager = init_chunk_manager(
|
||||||
sharded_ddp_model,
|
sharded_ddp_model,
|
||||||
|
|
|
@ -10,9 +10,7 @@ from colossalai.zero.gemini.chunk import search_chunk_configuration
|
||||||
from tests.kit.model_zoo import model_zoo
|
from tests.kit.model_zoo import model_zoo
|
||||||
|
|
||||||
PLACEMENT_CONFIGS = [
|
PLACEMENT_CONFIGS = [
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.0}, # zero2
|
{"placement_policy": "static", "shard_param_frac": 0.75},
|
||||||
{"placement_policy": "static", "shard_param_frac": 1.0}, # zero3
|
|
||||||
{"placement_policy": "static", "shard_param_frac": 0.5}, # zero3-half
|
|
||||||
{"placement_policy": "auto"},
|
{"placement_policy": "auto"},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -26,8 +24,8 @@ def ignore_the_first_parameter(model: torch.nn.Module):
|
||||||
|
|
||||||
@parameterize("placement_config", PLACEMENT_CONFIGS)
|
@parameterize("placement_config", PLACEMENT_CONFIGS)
|
||||||
@parameterize("keep_gathered", [True, False])
|
@parameterize("keep_gathered", [True, False])
|
||||||
@parameterize("model_name", ["transformers_gpt_lm", "transformers_bert_for_sequence_classification"])
|
@parameterize("model_name", ["transformers_gpt_lm"])
|
||||||
@parameterize("master_weights", [False, True])
|
@parameterize("master_weights", [True, False])
|
||||||
def exam_state_dict(placement_config, keep_gathered, model_name: str, master_weights: bool):
|
def exam_state_dict(placement_config, keep_gathered, model_name: str, master_weights: bool):
|
||||||
set_seed(431)
|
set_seed(431)
|
||||||
model_builder, data_gen_fn, output_transform_fn, *_ = next(iter(model_zoo.get_sub_registry(model_name).values()))
|
model_builder, data_gen_fn, output_transform_fn, *_ = next(iter(model_zoo.get_sub_registry(model_name).values()))
|
||||||
|
@ -81,7 +79,7 @@ def run_dist(rank, world_size, port):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.dist
|
@pytest.mark.dist
|
||||||
@pytest.mark.parametrize("world_size", [1, 4])
|
@pytest.mark.parametrize("world_size", [4])
|
||||||
@rerun_if_address_is_in_use()
|
@rerun_if_address_is_in_use()
|
||||||
def test_zero_ddp(world_size):
|
def test_zero_ddp(world_size):
|
||||||
spawn(run_dist, world_size)
|
spawn(run_dist, world_size)
|
||||||
|
|
Loading…
Reference in New Issue