ColossalAI/tests/test_autochunk/test_autochunk_codegen.py

114 lines
3.8 KiB
Python
Raw Normal View History

2023-01-09 06:53:04 +00:00
from functools import partial
2022-10-27 02:57:07 +00:00
import pytest
2023-01-06 03:44:01 +00:00
import torch
2022-11-07 10:26:13 +00:00
import torch.fx
2022-10-27 02:57:07 +00:00
import torch.multiprocessing as mp
2023-01-06 03:44:01 +00:00
2022-10-27 02:57:07 +00:00
import colossalai
from colossalai.core import global_context as gpc
2023-01-06 03:44:01 +00:00
from colossalai.fx import ColoTracer
2023-01-10 03:56:00 +00:00
from colossalai.fx._compatibility import is_compatible_with_meta
2023-01-09 07:32:19 +00:00
from colossalai.fx.codegen.activation_checkpoint_codegen import CODEGEN_AVAILABLE
2022-10-27 02:57:07 +00:00
from colossalai.fx.graph_module import ColoGraphModule
2023-01-06 03:44:01 +00:00
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.utils import free_port
2023-01-06 03:39:26 +00:00
from tests.test_autochunk.evoformer.evoformer import evoformer_base
2023-01-06 03:44:01 +00:00
2023-01-10 03:56:00 +00:00
if CODEGEN_AVAILABLE and is_compatible_with_meta():
2023-01-10 03:20:28 +00:00
from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen
2023-01-10 03:56:00 +00:00
from colossalai.fx.profiler import MetaTensor
2023-01-10 03:20:28 +00:00
2023-01-06 03:44:01 +00:00
def _test_fwd(model: torch.nn.Module, gm: ColoGraphModule, node, pair):
2023-01-09 06:26:22 +00:00
# for memory test
# torch.cuda.reset_peak_memory_stats()
# now_mem = torch.cuda.memory_allocated() / 1024**2
# with torch.no_grad():
# node1 = node.clone()
# pair1 = pair.clone()
# gm(node1, pair1)
# new_now_mem = torch.cuda.memory_allocated() / 1024**2
# new_max_mem = torch.cuda.max_memory_allocated() / 1024**2
# print(
# "autochunk now mem:%.2f max mem:%.2f"
# % (new_now_mem - now_mem, new_max_mem - now_mem)
# )
2023-01-06 03:44:01 +00:00
2022-10-27 02:57:07 +00:00
# test forward
with torch.no_grad():
non_fx_out = model(node, pair)
fx_out = gm(node, pair)
2022-12-12 10:25:47 +00:00
2023-01-10 04:29:09 +00:00
assert torch.allclose(non_fx_out[0], fx_out[0],
atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
torch.abs(non_fx_out[0] - fx_out[0]))
assert torch.allclose(non_fx_out[1], fx_out[1],
atol=1e-4), "fx_out doesn't comply with original output, diff is %.2e" % torch.mean(
torch.abs(non_fx_out[1] - fx_out[1]))
2022-10-27 02:57:07 +00:00
2023-01-09 06:53:04 +00:00
def _test_autochunk_codegen(rank, msa_len, pair_len, max_memory):
2023-01-10 04:29:09 +00:00
# launch colossalai
2023-01-06 03:44:01 +00:00
colossalai.launch(
config={},
rank=rank,
world_size=1,
host="localhost",
port=free_port(),
backend="nccl",
)
2022-10-27 02:57:07 +00:00
# build model and input
2022-11-02 07:49:25 +00:00
model = evoformer_base().cuda()
2023-01-09 06:26:22 +00:00
node = torch.randn(1, msa_len, pair_len, 256).cuda()
pair = torch.randn(1, pair_len, pair_len, 128).cuda()
2022-10-27 02:57:07 +00:00
# trace the module and replace codegen
2023-01-06 03:44:01 +00:00
graph = ColoTracer().trace(
model,
meta_args={
"node": node.to(torch.device("meta")),
"pair": pair.to(torch.device("meta")),
},
)
2023-01-10 04:29:09 +00:00
gm_prop = torch.fx.symbolic_trace(model) # must use symbolic_trace
2023-01-06 03:44:01 +00:00
interp = MetaInfoProp(gm_prop)
2023-01-10 04:29:09 +00:00
interp.propagate(MetaTensor(node, fake_device="cuda:0"), MetaTensor(pair, fake_device="cuda:0"))
2022-11-07 10:26:13 +00:00
# now run it twice to get meta info in graph module, not necessary
gm = torch.fx.GraphModule(model, graph)
interp = MetaInfoProp(gm)
2023-01-10 04:29:09 +00:00
interp.propagate(MetaTensor(node, fake_device="cuda:0"), MetaTensor(pair, fake_device="cuda:0"))
2022-10-27 02:57:07 +00:00
2023-01-09 06:53:04 +00:00
codegen = AutoChunkCodeGen(gm_prop, max_memory=max_memory)
2022-11-04 03:18:09 +00:00
graph.set_codegen(codegen)
2022-11-03 06:33:35 +00:00
gm = ColoGraphModule(model, graph)
2022-10-27 02:57:07 +00:00
gm.recompile()
2023-01-09 06:26:22 +00:00
# assert we have inserted chunk
code = graph.python_code("self").src
assert "chunk_size" in code
2023-01-06 03:44:01 +00:00
# print(code)
2022-10-27 02:57:07 +00:00
2023-01-06 03:44:01 +00:00
_test_fwd(model, gm, node, pair)
2022-10-27 02:57:07 +00:00
gpc.destroy()
2023-01-10 03:56:00 +00:00
@pytest.mark.skipif(not (CODEGEN_AVAILABLE and is_compatible_with_meta()), reason='torch version is lower than 1.12.0')
2023-01-09 06:57:47 +00:00
@pytest.mark.parametrize("max_memory", [None, 20, 25, 30])
2023-01-09 06:53:04 +00:00
@pytest.mark.parametrize("msa_len", [32])
@pytest.mark.parametrize("pair_len", [64])
def test_autochunk_codegen(msa_len, pair_len, max_memory):
run_func = partial(
_test_autochunk_codegen,
msa_len=msa_len,
pair_len=pair_len,
max_memory=max_memory,
)
mp.spawn(run_func, nprocs=1)
2022-10-27 02:57:07 +00:00
if __name__ == "__main__":
2023-01-09 08:29:33 +00:00
_test_autochunk_codegen(0, 32, 64, 25)