|
|
@ -14,16 +14,9 @@ from colossalai.logging import disable_existing_loggers
|
|
|
|
from colossalai.pipeline.schedule.v_schedule import PipelineGraph, ScheduledNode
|
|
|
|
from colossalai.pipeline.schedule.v_schedule import PipelineGraph, ScheduledNode
|
|
|
|
from colossalai.pipeline.schedule.zero_bubble_pp import ZeroBubbleVPipeScheduler
|
|
|
|
from colossalai.pipeline.schedule.zero_bubble_pp import ZeroBubbleVPipeScheduler
|
|
|
|
from colossalai.pipeline.stage_manager import PipelineStageManager
|
|
|
|
from colossalai.pipeline.stage_manager import PipelineStageManager
|
|
|
|
from colossalai.shardformer.layer.utils import Randomizer
|
|
|
|
|
|
|
|
from colossalai.tensor.d_tensor.api import clear_layout_converter
|
|
|
|
from colossalai.tensor.d_tensor.api import clear_layout_converter
|
|
|
|
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
|
|
|
from colossalai.testing import parameterize, rerun_if_address_is_in_use, spawn
|
|
|
|
from tests.kit.model_zoo import model_zoo
|
|
|
|
from tests.kit.model_zoo import model_zoo
|
|
|
|
from tests.test_shardformer.test_model._utils import (
|
|
|
|
|
|
|
|
build_model_from_hybrid_plugin,
|
|
|
|
|
|
|
|
check_weight,
|
|
|
|
|
|
|
|
run_forward_backward_with_hybrid_plugin,
|
|
|
|
|
|
|
|
unwrap_model,
|
|
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class MlpModel(nn.Module):
|
|
|
|
class MlpModel(nn.Module):
|
|
|
@ -437,7 +430,7 @@ def run_fwd_bwd_iter_input(test_config):
|
|
|
|
local_chunk.append(sub_model)
|
|
|
|
local_chunk.append(sub_model)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
# layer 3 & 4 to chunk 3 on rank3
|
|
|
|
# layer 3 & 4 to chunk 3 on rank3
|
|
|
|
local_chunk = torch.nn.Sequential().to(rank)
|
|
|
|
local_chunk = torch.nn.ModuleList().to(rank)
|
|
|
|
for idx, sub_model in enumerate(model.layers):
|
|
|
|
for idx, sub_model in enumerate(model.layers):
|
|
|
|
if idx == 3 or idx == 4:
|
|
|
|
if idx == 3 or idx == 4:
|
|
|
|
local_chunk.append(sub_model)
|
|
|
|
local_chunk.append(sub_model)
|
|
|
@ -594,7 +587,7 @@ def run_fwd_bwd_vschedule_with_optim(test_config):
|
|
|
|
local_chunk.append(sub_model)
|
|
|
|
local_chunk.append(sub_model)
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
# layer 3 & 4 to chunk 3 on rank3
|
|
|
|
# layer 3 & 4 to chunk 3 on rank3
|
|
|
|
local_chunk = torch.nn.Sequential().to(rank)
|
|
|
|
local_chunk = torch.nn.ModuleList().to(rank)
|
|
|
|
for idx, sub_model in enumerate(model.layers):
|
|
|
|
for idx, sub_model in enumerate(model.layers):
|
|
|
|
if idx == 3 or idx == 4:
|
|
|
|
if idx == 3 or idx == 4:
|
|
|
|
local_chunk.append(sub_model)
|
|
|
|
local_chunk.append(sub_model)
|
|
|
@ -718,44 +711,46 @@ def run_with_moehybridplugin(test_config):
|
|
|
|
clear_layout_converter()
|
|
|
|
clear_layout_converter()
|
|
|
|
torch.set_default_dtype(torch.bfloat16)
|
|
|
|
torch.set_default_dtype(torch.bfloat16)
|
|
|
|
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
|
|
|
|
for name, (model_fn, data_gen_fn, output_transform_fn, loss_fn, _) in sub_model_zoo.items():
|
|
|
|
if name in model_list:
|
|
|
|
data_gen_fn()
|
|
|
|
(
|
|
|
|
# print(f"data {data}")
|
|
|
|
org_model,
|
|
|
|
# if name in model_list:
|
|
|
|
org_optimizer,
|
|
|
|
# (
|
|
|
|
sharded_model,
|
|
|
|
# org_model,
|
|
|
|
sharded_optimizer,
|
|
|
|
# org_optimizer,
|
|
|
|
criterion,
|
|
|
|
# sharded_model,
|
|
|
|
booster,
|
|
|
|
# sharded_optimizer,
|
|
|
|
) = build_model_from_hybrid_plugin(model_fn, loss_fn, test_config, torch.optim.SGD, torch.optim.SGD)
|
|
|
|
# criterion,
|
|
|
|
|
|
|
|
# booster,
|
|
|
|
org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
|
|
|
|
# ) = build_model_from_hybrid_plugin(model_fn, loss_fn, test_config, torch.optim.SGD, torch.optim.SGD)
|
|
|
|
org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
|
|
|
|
|
|
|
|
)
|
|
|
|
# org_loss, org_output, sharded_loss, sharded_output = run_forward_backward_with_hybrid_plugin(
|
|
|
|
|
|
|
|
# org_model, sharded_model, sharded_optimizer, data_gen_fn, output_transform_fn, criterion, booster
|
|
|
|
stage_manager = booster.plugin.stage_manager
|
|
|
|
# )
|
|
|
|
tp_group = booster.plugin.tp_group
|
|
|
|
|
|
|
|
|
|
|
|
# stage_manager = booster.plugin.stage_manager
|
|
|
|
bert = unwrap_model(org_model, "BertModel", "bert")
|
|
|
|
# tp_group = booster.plugin.tp_group
|
|
|
|
sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
|
|
|
|
|
|
|
|
weight_layer_for_check = ["encoder.layer[0].output.dense", "encoder.layer[1].output.dense"]
|
|
|
|
# bert = unwrap_model(org_model, "BertModel", "bert")
|
|
|
|
|
|
|
|
# sharded_bert = unwrap_model(sharded_model, "BertModel", "bert")
|
|
|
|
org_optimizer.step()
|
|
|
|
# weight_layer_for_check = ["encoder.layer[0].output.dense", "encoder.layer[1].output.dense"]
|
|
|
|
sharded_optimizer.step()
|
|
|
|
|
|
|
|
|
|
|
|
# org_optimizer.step()
|
|
|
|
# check weights
|
|
|
|
# sharded_optimizer.step()
|
|
|
|
if test_config["precision"] == "bf16":
|
|
|
|
|
|
|
|
atol, rtol = 5e-4, 5e-4
|
|
|
|
# # check weights
|
|
|
|
else:
|
|
|
|
# if test_config["precision"] == "bf16":
|
|
|
|
atol, rtol = 5e-4, 5e-4
|
|
|
|
# atol, rtol = 5e-4, 5e-4
|
|
|
|
if stage_manager is None or stage_manager.is_first_stage(ignore_chunk=True):
|
|
|
|
# else:
|
|
|
|
check_weight(bert, sharded_bert, weight_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1)
|
|
|
|
# atol, rtol = 5e-4, 5e-4
|
|
|
|
# check optim states
|
|
|
|
# if stage_manager is None or stage_manager.is_first_stage(ignore_chunk=True):
|
|
|
|
# check_dist_optim_state(org_optimizer, sharded_optimizer.optim)
|
|
|
|
# check_weight(bert, sharded_bert, weight_layer_for_check, tp_group, atol=atol, rtol=rtol, dim=1)
|
|
|
|
|
|
|
|
# # check optim states
|
|
|
|
clear_layout_converter()
|
|
|
|
# # check_dist_optim_state(org_optimizer, sharded_optimizer.optim)
|
|
|
|
Randomizer.reset_index()
|
|
|
|
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
# clear_layout_converter()
|
|
|
|
print(f"Bert Model Zoo Test Passed")
|
|
|
|
# Randomizer.reset_index()
|
|
|
|
|
|
|
|
# torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
# print(f"Bert Model Zoo Test Passed")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# TODO:6) support booster & Hybrid base 4)
|
|
|
|
# TODO:6) support booster & Hybrid base 4)
|
|
|
@ -766,8 +761,9 @@ def run_with_moehybridplugin(test_config):
|
|
|
|
def run_dist(rank, world_size, port):
|
|
|
|
def run_dist(rank, world_size, port):
|
|
|
|
disable_existing_loggers()
|
|
|
|
disable_existing_loggers()
|
|
|
|
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
|
|
|
colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl")
|
|
|
|
run_fwd_bwd_iter_input()
|
|
|
|
# run_fwd_bwd_iter_input()
|
|
|
|
run_fwd_bwd_vschedule_with_optim()
|
|
|
|
run_fwd_bwd_vschedule_with_optim()
|
|
|
|
|
|
|
|
# run_with_moehybridplugin()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.dist
|
|
|
|
@pytest.mark.dist
|
|
|
|