Browse Source

[devops] add large-scale distributed test marker (#4452)

* [test] remove cpu marker

* [test] remove gpu marker

* [test] update pytest markers

* [ci] update unit test ci
pull/4465/head
Hongxin Liu 1 year ago committed by GitHub
parent
commit
26e29d58f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      .github/workflows/build_on_pr.yml
  2. 2
      .github/workflows/compatiblity_test_on_dispatch.yml
  3. 2
      .github/workflows/compatiblity_test_on_pr.yml
  4. 2
      .github/workflows/compatiblity_test_on_schedule.yml
  5. 79
      applications/Chat/tests/test_dataset.py
  6. 105
      applications/Chat/tests/test_models.py
  7. 6
      pytest.ini
  8. 1
      tests/test_config/test_load_config.py
  9. 1
      tests/test_context/test_hybrid_parallel.py
  10. 3
      tests/test_data/test_cifar10_dataset.py
  11. 1
      tests/test_data/test_data_parallel_sampler.py
  12. 1
      tests/test_data/test_deterministic_dataloader.py
  13. 1
      tests/test_utils/test_activation_checkpointing.py

2
.github/workflows/build_on_pr.yml

@ -208,7 +208,7 @@ jobs:
- name: Execute Unit Testing - name: Execute Unit Testing
run: | run: |
CURL_CA_BUNDLE="" PYTHONPATH=$PWD pytest --testmon --testmon-cov=. --durations=10 tests/ CURL_CA_BUNDLE="" PYTHONPATH=$PWD pytest -m "not largedist" --testmon --testmon-cov=. --durations=10 tests/
env: env:
DATA: /data/scratch/cifar-10 DATA: /data/scratch/cifar-10
NCCL_SHM_DISABLE: 1 NCCL_SHM_DISABLE: 1

2
.github/workflows/compatiblity_test_on_dispatch.yml

@ -44,7 +44,7 @@ jobs:
name: Test for PyTorch Compatibility name: Test for PyTorch Compatibility
needs: matrix_preparation needs: matrix_preparation
if: github.repository == 'hpcaitech/ColossalAI' if: github.repository == 'hpcaitech/ColossalAI'
runs-on: [self-hosted, gpu] runs-on: [self-hosted, 8-gpu]
strategy: strategy:
fail-fast: false fail-fast: false
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}} matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}

2
.github/workflows/compatiblity_test_on_pr.yml

@ -35,7 +35,7 @@ jobs:
name: Test for PyTorch Compatibility name: Test for PyTorch Compatibility
needs: matrix_preparation needs: matrix_preparation
if: github.repository == 'hpcaitech/ColossalAI' if: github.repository == 'hpcaitech/ColossalAI'
runs-on: [self-hosted, gpu] runs-on: [self-hosted, 8-gpu]
strategy: strategy:
fail-fast: false fail-fast: false
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}} matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}

2
.github/workflows/compatiblity_test_on_schedule.yml

@ -32,7 +32,7 @@ jobs:
name: Test for PyTorch Compatibility name: Test for PyTorch Compatibility
needs: matrix_preparation needs: matrix_preparation
if: github.repository == 'hpcaitech/ColossalAI' if: github.repository == 'hpcaitech/ColossalAI'
runs-on: [self-hosted, gpu] runs-on: [self-hosted, 8-gpu]
strategy: strategy:
fail-fast: false fail-fast: false
matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}} matrix: ${{fromJson(needs.matrix_preparation.outputs.matrix)}}

79
applications/Chat/tests/test_dataset.py

@ -14,29 +14,43 @@ from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
SFT_DATASET = [ SFT_DATASET = [
{ {
"instruction": "Provide a list of the top 10 most popular mobile games in Asia", "instruction":
"input": "", "Provide a list of the top 10 most popular mobile games in Asia",
"output": "The top 10 most popular mobile games in Asia are:\n1) PUBG Mobile\n2) Pokemon Go\n3) Candy Crush Saga\n4) Free Fire\n5) Clash of Clans\n6) Mario Kart Tour\n7) Arena of Valor\n8) Fantasy Westward Journey\n9) Subway Surfers\n10) ARK Survival Evolved", "input":
"id": 0 "",
"output":
"The top 10 most popular mobile games in Asia are:\n1) PUBG Mobile\n2) Pokemon Go\n3) Candy Crush Saga\n4) Free Fire\n5) Clash of Clans\n6) Mario Kart Tour\n7) Arena of Valor\n8) Fantasy Westward Journey\n9) Subway Surfers\n10) ARK Survival Evolved",
"id":
0
}, },
{ {
"instruction": "Please provide an action plan for reducing carbon footprint on a corporate level", "instruction":
"input": "", "Please provide an action plan for reducing carbon footprint on a corporate level",
"output": "An action plan for reducing carbon footprint on a corporate level could include switching to renewable energy sources (e.g., solar or wind powered), investing in energy efficient infrastructure, implementing green production practices (e.g., reduce water and paper usage, dispose of materials responsibly, recycle/reuse materials), reducing or eliminating travel, encouraging employees to be conscious of and reduce their own energy usage, and incentivizing employees to participate in sustainable work practices.", "input":
"id": 1 "",
"output":
"An action plan for reducing carbon footprint on a corporate level could include switching to renewable energy sources (e.g., solar or wind powered), investing in energy efficient infrastructure, implementing green production practices (e.g., reduce water and paper usage, dispose of materials responsibly, recycle/reuse materials), reducing or eliminating travel, encouraging employees to be conscious of and reduce their own energy usage, and incentivizing employees to participate in sustainable work practices.",
"id":
1
}, },
{ {
"instruction": "Write a persuasive email to your boss explaining why you should have a pay raise", "instruction":
"input": "", "Write a persuasive email to your boss explaining why you should have a pay raise",
"output": "Dear [Boss], \n\nI hope this message finds you well. I am writing to request a pay raise.\n\nAs you know, I have been a dedicated and hardworking member of the team since I started working here [insert number] of months/years ago. My enthusiasm and passion for my job has remained consistent over the years, and I have always given 100% to my role. \n\nI understand that the current financial situation is challenging, however, I would sincerely appreciate you taking the time to consider my request. I believe that my dedication to the job and the value that I bring to the organization warrants a raise. I work diligently and am confident that I can continue to be an asset to the company. \n\nI hope my request is taken into account and I thank you in advance for your understanding. I look forward to our conversation. \n\nSincerely,\n[Your Name]", "input":
"id": 2 "",
"output":
"Dear [Boss], \n\nI hope this message finds you well. I am writing to request a pay raise.\n\nAs you know, I have been a dedicated and hardworking member of the team since I started working here [insert number] of months/years ago. My enthusiasm and passion for my job has remained consistent over the years, and I have always given 100% to my role. \n\nI understand that the current financial situation is challenging, however, I would sincerely appreciate you taking the time to consider my request. I believe that my dedication to the job and the value that I bring to the organization warrants a raise. I work diligently and am confident that I can continue to be an asset to the company. \n\nI hope my request is taken into account and I thank you in advance for your understanding. I look forward to our conversation. \n\nSincerely,\n[Your Name]",
"id":
2
}, },
] ]
PROMPT_DATASET = [ PROMPT_DATASET = [
{ {
"instruction": "Edit this paragraph to make it more concise: \"Yesterday, I went to the store and bought some things. Then, I came home and put them away. After that, I went for a walk and met some friends.\"", "instruction":
"id": 0 "Edit this paragraph to make it more concise: \"Yesterday, I went to the store and bought some things. Then, I came home and put them away. After that, I went for a walk and met some friends.\"",
"id":
0
}, },
{ {
"instruction": "Write a descriptive paragraph about a memorable vacation you went on", "instruction": "Write a descriptive paragraph about a memorable vacation you went on",
@ -71,9 +85,7 @@ def make_tokenizer(model: str):
return tokenizer return tokenizer
def check_content(input_ids_stripped: torch.Tensor, def check_content(input_ids_stripped: torch.Tensor, tokenizer: PreTrainedTokenizer, model: str):
tokenizer: PreTrainedTokenizer,
model: str):
if model == "opt": if model == "opt":
# NOTE: Contrary to GPT2, OPT adds the EOS token </s> to the beginning of every prompt. # NOTE: Contrary to GPT2, OPT adds the EOS token </s> to the beginning of every prompt.
assert input_ids_stripped[0] == tokenizer.eos_token_id assert input_ids_stripped[0] == tokenizer.eos_token_id
@ -90,13 +102,10 @@ def check_content(input_ids_stripped: torch.Tensor,
assert input_ids_stripped != tokenizer.mask_token_id assert input_ids_stripped != tokenizer.mask_token_id
@pytest.mark.cpu
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"]) @pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
@pytest.mark.parametrize("max_length", [32, 1024]) @pytest.mark.parametrize("max_length", [32, 1024])
@pytest.mark.parametrize("max_datasets_size", [2]) @pytest.mark.parametrize("max_datasets_size", [2])
def test_prompt_dataset(model: str, def test_prompt_dataset(model: str, max_datasets_size: int, max_length: int):
max_datasets_size: int,
max_length: int):
with tempfile.TemporaryDirectory() as tmp_dir: with tempfile.TemporaryDirectory() as tmp_dir:
dataset_name = "prompt_dataset.json" dataset_name = "prompt_dataset.json"
with open(os.path.join(tmp_dir, dataset_name), "w") as f: with open(os.path.join(tmp_dir, dataset_name), "w") as f:
@ -119,19 +128,12 @@ def test_prompt_dataset(model: str,
check_content(input_ids.masked_select(attention_mask), tokenizer, model) check_content(input_ids.masked_select(attention_mask), tokenizer, model)
@pytest.mark.cpu
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"]) @pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
@pytest.mark.parametrize(["dataset_path", "subset"], [ @pytest.mark.parametrize(["dataset_path", "subset"], [("Anthropic/hh-rlhf", "harmless-base"),
("Anthropic/hh-rlhf", "harmless-base"), ("Dahoas/rm-static", None)])
("Dahoas/rm-static", None)
])
@pytest.mark.parametrize("max_datasets_size", [32]) @pytest.mark.parametrize("max_datasets_size", [32])
@pytest.mark.parametrize("max_length", [32, 1024]) @pytest.mark.parametrize("max_length", [32, 1024])
def test_reward_dataset(model: str, def test_reward_dataset(model: str, dataset_path: str, subset: Optional[str], max_datasets_size: int, max_length: int):
dataset_path: str,
subset: Optional[str],
max_datasets_size: int,
max_length: int):
data = load_dataset(dataset_path, data_dir=subset) data = load_dataset(dataset_path, data_dir=subset)
assert max_datasets_size <= len(data["train"]) \ assert max_datasets_size <= len(data["train"]) \
and max_datasets_size <= len(data["test"]) and max_datasets_size <= len(data["test"])
@ -188,15 +190,11 @@ def test_reward_dataset(model: str,
assert torch.all(r_mask) assert torch.all(r_mask)
@pytest.mark.cpu
@pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"]) @pytest.mark.parametrize("model", ["gpt2", "bloom", "opt", "llama"])
@pytest.mark.parametrize("dataset_path", ["yizhongw/self_instruct", None]) @pytest.mark.parametrize("dataset_path", ["yizhongw/self_instruct", None])
@pytest.mark.parametrize("max_dataset_size", [2]) @pytest.mark.parametrize("max_dataset_size", [2])
@pytest.mark.parametrize("max_length", [32, 1024]) @pytest.mark.parametrize("max_length", [32, 1024])
def test_sft_dataset(model: str, def test_sft_dataset(model: str, dataset_path: Optional[str], max_dataset_size: int, max_length: int):
dataset_path: Optional[str],
max_dataset_size: int,
max_length: int):
tokenizer = make_tokenizer(model) tokenizer = make_tokenizer(model)
if dataset_path == "yizhongw/self_instruct": if dataset_path == "yizhongw/self_instruct":
data = load_dataset(dataset_path, "super_natural_instructions") data = load_dataset(dataset_path, "super_natural_instructions")
@ -232,10 +230,7 @@ def test_sft_dataset(model: str,
if __name__ == "__main__": if __name__ == "__main__":
test_sft_dataset(model="bloom", test_sft_dataset(model="bloom", dataset_path="yizhongw/self_instruct", max_dataset_size=2, max_length=256)
dataset_path="yizhongw/self_instruct",
max_dataset_size=2,
max_length=256)
test_reward_dataset(model="gpt2", test_reward_dataset(model="gpt2",
dataset_path="Anthropic/hh-rlhf", dataset_path="Anthropic/hh-rlhf",
@ -243,6 +238,4 @@ if __name__ == "__main__":
max_datasets_size=8, max_datasets_size=8,
max_length=256) max_length=256)
test_prompt_dataset(model="opt", test_prompt_dataset(model="opt", max_datasets_size=2, max_length=128)
max_datasets_size=2,
max_length=128)

105
applications/Chat/tests/test_models.py

@ -15,16 +15,17 @@ from coati.models.opt import OPTRM, OPTActor, OPTCritic
from coati.models.utils import calc_action_log_probs, compute_reward, masked_mean from coati.models.utils import calc_action_log_probs, compute_reward, masked_mean
@pytest.mark.gpu
@pytest.mark.parametrize("batch_size", [4]) @pytest.mark.parametrize("batch_size", [4])
@pytest.mark.parametrize("seq_len", [32]) @pytest.mark.parametrize("seq_len", [32])
@pytest.mark.parametrize("actor_maker", [ @pytest.mark.parametrize(
lambda: BLOOMActor(), "actor_maker",
lambda: GPTActor(), [
lambda: BLOOMActor(),
lambda: GPTActor(),
# HACK: skip llama due to long execution time # HACK: skip llama due to long execution time
# lambda: LlamaActor(), # lambda: LlamaActor(),
lambda: OPTActor() lambda: OPTActor()
]) ])
@pytest.mark.parametrize("generate_kwargs", [{ @pytest.mark.parametrize("generate_kwargs", [{
"max_length": 64, "max_length": 64,
"use_cache": True, "use_cache": True,
@ -32,23 +33,15 @@ from coati.models.utils import calc_action_log_probs, compute_reward, masked_mea
"temperature": 1.0, "temperature": 1.0,
"top_k": 50, "top_k": 50,
}]) }])
def test_generation(actor_maker: Callable[[], Actor], def test_generation(actor_maker: Callable[[], Actor], batch_size: int, seq_len: int, generate_kwargs: Dict[str, Any]):
batch_size: int,
seq_len: int,
generate_kwargs: Dict[str, Any]
):
actor = actor_maker() actor = actor_maker()
input_ids = torch.randint(0, 100, (batch_size, seq_len)).cuda() input_ids = torch.randint(0, 100, (batch_size, seq_len)).cuda()
sequences = generate(actor.cuda(), input_ids, **generate_kwargs) sequences = generate(actor.cuda(), input_ids, **generate_kwargs)
assert sequences.shape == (batch_size, generate_kwargs["max_length"]) assert sequences.shape == (batch_size, generate_kwargs["max_length"])
@pytest.mark.cpu
def test_utils(): def test_utils():
fn_input = { fn_input = {"tensor": torch.ones((10,)), "mask": torch.randint(0, 2, (10,))}
"tensor": torch.ones((10, )),
"mask": torch.randint(0, 2, (10, ))
}
fn_output = masked_mean(dim=0, **fn_input) fn_output = masked_mean(dim=0, **fn_input)
assert fn_output.dim() == 0 assert fn_output.dim() == 0
assert torch.allclose(fn_output, torch.tensor(1.0)) assert torch.allclose(fn_output, torch.tensor(1.0))
@ -56,14 +49,14 @@ def test_utils():
batch_size = 4 batch_size = 4
num_labels = 10 num_labels = 10
fn_input = { fn_input = {
"r": torch.ones((batch_size, )), "r": torch.ones((batch_size,)),
"kl_coef": 1.0, "kl_coef": 1.0,
"log_probs": torch.randn((batch_size, num_labels)), "log_probs": torch.randn((batch_size, num_labels)),
"log_probs_base": torch.randn((batch_size, num_labels)), "log_probs_base": torch.randn((batch_size, num_labels)),
"action_mask": torch.randint(0, 2, (batch_size, num_labels)) "action_mask": torch.randint(0, 2, (batch_size, num_labels))
} }
fn_output = compute_reward(**fn_input) fn_output = compute_reward(**fn_input)
assert fn_output.shape == (batch_size, ) assert fn_output.shape == (batch_size,)
batch_size = 4 batch_size = 4
seq_len = 32 seq_len = 32
@ -80,17 +73,11 @@ def test_utils():
assert fn_output.shape == (batch_size, num_actions) assert fn_output.shape == (batch_size, num_actions)
@pytest.mark.cpu
@pytest.mark.parametrize("lora_rank", [4]) @pytest.mark.parametrize("lora_rank", [4])
@pytest.mark.parametrize("num_dim", [32]) @pytest.mark.parametrize("num_dim", [32])
@pytest.mark.parametrize("num_layers", [4]) @pytest.mark.parametrize("num_layers", [4])
def test_lora(lora_rank: int, def test_lora(lora_rank: int, num_dim: int, num_layers: int):
num_dim: int, model = nn.ModuleList([nn.Linear(num_dim, num_dim) for _ in range(num_layers)])
num_layers: int):
model = nn.ModuleList(
[nn.Linear(num_dim, num_dim)
for _ in range(num_layers)]
)
lora_model = convert_to_lora_module(model, lora_rank) lora_model = convert_to_lora_module(model, lora_rank)
assert isinstance(lora_model, nn.ModuleList) assert isinstance(lora_model, nn.ModuleList)
for i in range(num_layers): for i in range(num_layers):
@ -103,8 +90,7 @@ def test_lora(lora_rank: int,
assert isinstance(lora_model[i], LoraLinear) assert isinstance(lora_model[i], LoraLinear)
assert torch.allclose(old_model[i].weight, lora_model[i].weight) assert torch.allclose(old_model[i].weight, lora_model[i].weight)
assert torch.allclose(old_model[i].bias, lora_model[i].bias) assert torch.allclose(old_model[i].bias, lora_model[i].bias)
assert torch.allclose(old_model[i].lora_B @ old_model[i].lora_A, assert torch.allclose(old_model[i].lora_B @ old_model[i].lora_A, lora_model[i].lora_B @ lora_model[i].lora_A)
lora_model[i].lora_B @ lora_model[i].lora_A)
optimizer = torch.optim.Adam(lora_model.parameters()) optimizer = torch.optim.Adam(lora_model.parameters())
x = torch.randn(8, num_dim) x = torch.randn(8, num_dim)
for i in range(num_layers): for i in range(num_layers):
@ -120,20 +106,19 @@ def test_lora(lora_rank: int,
lora_model[i].lora_B @ lora_model[i].lora_A) lora_model[i].lora_B @ lora_model[i].lora_A)
@pytest.mark.cpu
@pytest.mark.parametrize("batch_size", [8]) @pytest.mark.parametrize("batch_size", [8])
@pytest.mark.parametrize("seq_len", [128]) @pytest.mark.parametrize("seq_len", [128])
@pytest.mark.parametrize("models_maker", [ @pytest.mark.parametrize(
lambda: (BLOOMActor(), BLOOMCritic(), BLOOMRM()), "models_maker",
lambda: (GPTActor(), GPTCritic(), GPTRM()), [
lambda: (BLOOMActor(), BLOOMCritic(), BLOOMRM()),
lambda: (GPTActor(), GPTCritic(), GPTRM()),
# HACK: skip llama due to long execution time # HACK: skip llama due to long execution time
# lambda: (LlamaActor(), LlamaCritic(), LlamaRM()), # lambda: (LlamaActor(), LlamaCritic(), LlamaRM()),
lambda: (OPTActor(), OPTCritic(), OPTRM()), lambda: (OPTActor(), OPTCritic(), OPTRM()),
]) ])
@torch.no_grad() @torch.no_grad()
def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]], def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]], batch_size: int, seq_len: int):
batch_size: int,
seq_len: int):
actor_input = { actor_input = {
"input_ids": torch.randint(0, 100, (batch_size, seq_len)), "input_ids": torch.randint(0, 100, (batch_size, seq_len)),
@ -162,17 +147,14 @@ def test_models(models_maker: Callable[[], Tuple[Actor, Critic, RewardModel]],
rm_output = rm(**rm_input) rm_output = rm(**rm_input)
assert actor_output.logits.shape[:2] == (batch_size, seq_len) assert actor_output.logits.shape[:2] == (batch_size, seq_len)
assert critic_output.shape == (batch_size, ) assert critic_output.shape == (batch_size,)
assert rm_output.shape == (batch_size, ) assert rm_output.shape == (batch_size,)
@pytest.mark.cpu
@pytest.mark.parametrize("batch_size", [16]) @pytest.mark.parametrize("batch_size", [16])
@pytest.mark.parametrize("seq_len", [128]) @pytest.mark.parametrize("seq_len", [128])
@pytest.mark.parametrize("num_labels", [100]) @pytest.mark.parametrize("num_labels", [100])
def test_loss(batch_size: int, def test_loss(batch_size: int, seq_len: int, num_labels: int):
seq_len: int,
num_labels: int):
loss = GPTLMLoss() loss = GPTLMLoss()
loss_input = { loss_input = {
"logits": torch.randn(batch_size, seq_len, num_labels), "logits": torch.randn(batch_size, seq_len, num_labels),
@ -182,54 +164,43 @@ def test_loss(batch_size: int,
loss = PolicyLoss() loss = PolicyLoss()
loss_input = { loss_input = {
"log_probs": torch.randn(batch_size, ), "log_probs": torch.randn(batch_size,),
"old_log_probs": torch.randn(batch_size, ), "old_log_probs": torch.randn(batch_size,),
"advantages": torch.randn(batch_size, ) "advantages": torch.randn(batch_size,)
} }
loss_output = loss(**loss_input) loss_output = loss(**loss_input)
loss = ValueLoss() loss = ValueLoss()
loss_input = { loss_input = {
"values": torch.randn(batch_size, ), "values": torch.randn(batch_size,),
"old_values": torch.randn(batch_size, ), "old_values": torch.randn(batch_size,),
"reward": torch.randn(batch_size, ) "reward": torch.randn(batch_size,)
} }
loss_output = loss(**loss_input) loss_output = loss(**loss_input)
loss = LogSigLoss() loss = LogSigLoss()
loss_input = { loss_input = {
"chosen_reward": torch.randn(batch_size, ), "chosen_reward": torch.randn(batch_size,),
"reject_reward": torch.randn(batch_size, ), "reject_reward": torch.randn(batch_size,),
} }
loss_output = loss(**loss_input) loss_output = loss(**loss_input)
loss = LogExpLoss() loss = LogExpLoss()
loss_input = { loss_input = {
"chosen_reward": torch.randn(batch_size, ), "chosen_reward": torch.randn(batch_size,),
"reject_reward": torch.randn(batch_size, ), "reject_reward": torch.randn(batch_size,),
} }
loss_output = loss(**loss_input) loss_output = loss(**loss_input)
if __name__ == "__main__": if __name__ == "__main__":
generate_kwargs = dict(max_length=40, generate_kwargs = dict(max_length=40, use_cache=True, do_sample=True, temperature=1.0, top_k=50)
use_cache=True, test_generation(lambda: LlamaActor(), batch_size=4, seq_len=32, generate_kwargs=generate_kwargs)
do_sample=True,
temperature=1.0,
top_k=50)
test_generation(lambda: LlamaActor(),
batch_size=4,
seq_len=32,
generate_kwargs=generate_kwargs)
test_utils() test_utils()
test_lora(lora_rank=2, num_dim=8, num_layers=2) test_lora(lora_rank=2, num_dim=8, num_layers=2)
test_models(models_maker=lambda: (BLOOMActor(), test_models(models_maker=lambda: (BLOOMActor(), BLOOMCritic(), BLOOMRM()), batch_size=8, seq_len=128)
BLOOMCritic(),
BLOOMRM()),
batch_size=8,
seq_len=128)
test_loss(batch_size=8, seq_len=128, num_labels=100) test_loss(batch_size=8, seq_len=128, num_labels=100)

6
pytest.ini

@ -1,7 +1,5 @@
[pytest] [pytest]
markers = markers =
cpu: tests which can run on CPU dist: tests which are run in a multi-GPU or multi-machine environment (at least 4 GPUs)
gpu: tests which requires a single GPU largedist: tests which are run in a multi-GPU or multi-machine environment (at least 8 GPUs)
dist: tests which are run in a multi-GPU or multi-machine environment
experiment: tests for experimental features
addopts = --ignore=tests/test_analyzer --ignore=tests/test_auto_parallel --ignore=tests/test_autochunk --ignore=tests/test_moe addopts = --ignore=tests/test_analyzer --ignore=tests/test_auto_parallel --ignore=tests/test_autochunk --ignore=tests/test_moe

1
tests/test_config/test_load_config.py

@ -8,7 +8,6 @@ import pytest
from colossalai.context.config import Config from colossalai.context.config import Config
@pytest.mark.cpu
def test_load_config(): def test_load_config():
filename = Path(__file__).parent.joinpath('sample_config.py') filename = Path(__file__).parent.joinpath('sample_config.py')
config = Config.from_file(filename) config = Config.from_file(filename)

1
tests/test_context/test_hybrid_parallel.py

@ -143,7 +143,6 @@ def run_dist(rank, world_size, port, backend, port_list, host):
reset_seeds() reset_seeds()
@pytest.mark.cpu
@rerun_if_address_is_in_use() @rerun_if_address_is_in_use()
def test_context(): def test_context():
""" """

3
tests/test_data/test_cifar10_dataset.py

@ -5,11 +5,10 @@ import os
from pathlib import Path from pathlib import Path
import pytest import pytest
from torchvision import transforms, datasets
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from torchvision import datasets, transforms
@pytest.mark.cpu
def test_cifar10_dataset(): def test_cifar10_dataset():
# build transform # build transform
transform_pipeline = [transforms.ToTensor()] transform_pipeline = [transforms.ToTensor()]

1
tests/test_data/test_data_parallel_sampler.py

@ -53,7 +53,6 @@ def run_data_sampler(rank, world_size, port):
torch.cuda.empty_cache() torch.cuda.empty_cache()
@pytest.mark.cpu
@rerun_if_address_is_in_use() @rerun_if_address_is_in_use()
def test_data_sampler(): def test_data_sampler():
spawn(run_data_sampler, 4) spawn(run_data_sampler, 4)

1
tests/test_data/test_deterministic_dataloader.py

@ -64,7 +64,6 @@ def run_data_sampler(rank, world_size, port):
torch.cuda.empty_cache() torch.cuda.empty_cache()
@pytest.mark.cpu
@rerun_if_address_is_in_use() @rerun_if_address_is_in_use()
def test_data_sampler(): def test_data_sampler():
spawn(run_data_sampler, 4) spawn(run_data_sampler, 4)

1
tests/test_utils/test_activation_checkpointing.py

@ -40,7 +40,6 @@ def forward_inplace(x, weight):
return out return out
@pytest.mark.gpu
@clear_cache_before_run() @clear_cache_before_run()
@parameterize("use_reentrant", [True, False]) @parameterize("use_reentrant", [True, False])
@parameterize("cpu_offload", [True, False]) @parameterize("cpu_offload", [True, False])

Loading…
Cancel
Save