ColossalAI/tests/test_gemini/test_mem_tracer_paramOP.py

47 lines
1.6 KiB
Python

import numpy as np
import torch
from colossalai.gemini.memory_tracer.param_tracer_wrapper import ParamWrapper
from colossalai.utils.model.colo_init_context import ColoInitContext
from tests.components_to_test.registry import non_distributed_component_funcs
def run_fwd_bwd(model, data, label, criterion, enable_autocast=False):
with torch.cuda.amp.autocast(enabled=enable_autocast):
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
model.backward(loss)
def run_param_wrapper_testing():
test_models = ['repeated_computed_layers', 'simple_net', 'no_leaf_module', 'bert']
for model_name in test_models:
get_components_func = non_distributed_component_funcs.get_callable(model_name)
model_builder, train_dataloader, _, _, criterion = get_components_func()
with ColoInitContext(device=torch.device('cpu')):
model = model_builder(checkpoint=False)
model = ParamWrapper(model)
for i, (data, label) in enumerate(train_dataloader):
if i > 1:
break
data = data.cuda()
label = label.cuda()
run_fwd_bwd(model, data, label, criterion, False)
cuda_non_model_data_list = np.array(model.param_op_hook._non_model_data_list) / 1024 ** 2
print("cuda_non_model_data_list", len(cuda_non_model_data_list))
# print(model.param_op_hook._non_model_data_list)
del model
if __name__ == '__main__':
run_param_wrapper_testing()