2022-12-29 06:42:29 +00:00
|
|
|
import time
|
|
|
|
|
2022-12-29 06:28:38 +00:00
|
|
|
import torch
|
|
|
|
import torch.fx
|
2022-12-29 06:42:29 +00:00
|
|
|
|
2023-01-06 06:14:45 +00:00
|
|
|
from colossalai.autochunk.autochunk_codegen import AutoChunkCodeGen
|
2022-12-29 06:28:38 +00:00
|
|
|
from colossalai.fx import ColoTracer
|
|
|
|
from colossalai.fx.graph_module import ColoGraphModule
|
2022-12-29 06:42:29 +00:00
|
|
|
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
|
2022-12-29 06:28:38 +00:00
|
|
|
from colossalai.fx.profiler import MetaTensor
|
2023-01-06 03:39:26 +00:00
|
|
|
from tests.test_autochunk.evoformer.evoformer import evoformer_base
|
|
|
|
from tests.test_autochunk.openfold.evoformer import EvoformerBlock
|
2022-12-29 06:28:38 +00:00
|
|
|
|
|
|
|
|
2022-12-29 08:01:05 +00:00
|
|
|
def _benchmark_evoformer(model: torch.nn.Module, node, pair, title, chunk_size=None):
|
2022-12-29 06:42:29 +00:00
|
|
|
torch.cuda.reset_peak_memory_stats()
|
|
|
|
now_mem = torch.cuda.memory_allocated() / 1024**2
|
|
|
|
|
2023-01-01 16:04:47 +00:00
|
|
|
loop = 3
|
2022-12-29 06:28:38 +00:00
|
|
|
with torch.no_grad():
|
2023-01-01 16:04:47 +00:00
|
|
|
for _ in range(loop // 2 + 1):
|
2022-12-29 08:01:05 +00:00
|
|
|
if chunk_size:
|
|
|
|
model(node, pair, chunk_size)
|
|
|
|
else:
|
|
|
|
model(node, pair)
|
2022-12-29 06:28:38 +00:00
|
|
|
torch.cuda.synchronize()
|
|
|
|
time1 = time.time()
|
|
|
|
for _ in range(loop):
|
2022-12-29 08:01:05 +00:00
|
|
|
if chunk_size:
|
|
|
|
model(node, pair, chunk_size)
|
|
|
|
else:
|
|
|
|
model(node, pair)
|
2022-12-29 06:28:38 +00:00
|
|
|
torch.cuda.synchronize()
|
|
|
|
time2 = time.time()
|
2022-12-29 06:42:29 +00:00
|
|
|
|
|
|
|
new_max_mem = torch.cuda.max_memory_allocated() / 1024**2
|
|
|
|
print(
|
|
|
|
"%s: time %.4fs, mem %dMB"
|
|
|
|
% (title, (time2 - time1) / loop, new_max_mem - now_mem)
|
|
|
|
)
|
2022-12-29 06:28:38 +00:00
|
|
|
|
|
|
|
|
2022-12-29 06:47:16 +00:00
|
|
|
def _build_autochunk(model, max_memory, node, pair):
|
2022-12-29 06:28:38 +00:00
|
|
|
# trace the module and replace codegen
|
|
|
|
graph = ColoTracer().trace(
|
|
|
|
model,
|
|
|
|
meta_args={
|
|
|
|
"node": node.to(torch.device("meta")),
|
|
|
|
"pair": pair.to(torch.device("meta")),
|
|
|
|
},
|
|
|
|
)
|
2023-01-06 06:14:45 +00:00
|
|
|
|
2022-12-29 06:28:38 +00:00
|
|
|
gm_prop = torch.fx.symbolic_trace(model) # must use symbolic_trace
|
|
|
|
interp = MetaInfoProp(gm_prop)
|
|
|
|
interp.propagate(
|
|
|
|
MetaTensor(node, fake_device="cuda:0"), MetaTensor(pair, fake_device="cuda:0")
|
|
|
|
)
|
2023-01-06 06:14:45 +00:00
|
|
|
|
2022-12-29 06:28:38 +00:00
|
|
|
# now run it twice to get meta info in graph module, not necessary
|
|
|
|
gm = torch.fx.GraphModule(model, graph)
|
|
|
|
interp = MetaInfoProp(gm)
|
|
|
|
interp.propagate(
|
|
|
|
MetaTensor(node, fake_device="cuda:0"), MetaTensor(pair, fake_device="cuda:0")
|
|
|
|
)
|
2023-01-06 06:14:45 +00:00
|
|
|
|
2022-12-29 06:28:38 +00:00
|
|
|
# set code_gen
|
2023-01-06 06:19:45 +00:00
|
|
|
codegen = AutoChunkCodeGen(gm_prop, max_memory, print_mem=False)
|
2022-12-29 06:28:38 +00:00
|
|
|
graph.set_codegen(codegen)
|
|
|
|
gm = ColoGraphModule(model, graph)
|
|
|
|
gm.recompile()
|
2023-01-06 06:14:45 +00:00
|
|
|
|
2022-12-29 06:28:38 +00:00
|
|
|
# print
|
2023-01-06 06:14:45 +00:00
|
|
|
# code = graph.python_code("self").src
|
|
|
|
# print(code)
|
2022-12-29 06:47:16 +00:00
|
|
|
return gm
|
2022-12-29 06:28:38 +00:00
|
|
|
|
|
|
|
|
2022-12-29 08:01:05 +00:00
|
|
|
def _build_openfold():
|
|
|
|
model = EvoformerBlock(
|
|
|
|
c_m=256,
|
|
|
|
c_z=128,
|
|
|
|
c_hidden_msa_att=32,
|
|
|
|
c_hidden_opm=32,
|
|
|
|
c_hidden_mul=128,
|
|
|
|
c_hidden_pair_att=32,
|
|
|
|
no_heads_msa=8,
|
|
|
|
no_heads_pair=4,
|
|
|
|
transition_n=4,
|
|
|
|
msa_dropout=0.15,
|
|
|
|
pair_dropout=0.15,
|
|
|
|
inf=1e4,
|
|
|
|
eps=1e-4,
|
|
|
|
is_multimer=False,
|
|
|
|
).cuda()
|
|
|
|
return model
|
|
|
|
|
|
|
|
|
2022-12-29 06:47:47 +00:00
|
|
|
def benchmark_evoformer():
|
|
|
|
# init data and model
|
2023-01-05 09:54:25 +00:00
|
|
|
msa_len = 256
|
2023-01-06 03:39:26 +00:00
|
|
|
pair_len = 256
|
2022-12-29 06:47:47 +00:00
|
|
|
node = torch.randn(1, msa_len, pair_len, 256).cuda()
|
|
|
|
pair = torch.randn(1, pair_len, pair_len, 128).cuda()
|
|
|
|
model = evoformer_base().cuda()
|
|
|
|
|
|
|
|
# build autochunk model
|
2023-01-06 09:24:23 +00:00
|
|
|
max_memory = 1000 # MB fit memory mode
|
|
|
|
# max_memory = None # min memory mode
|
2022-12-29 08:01:05 +00:00
|
|
|
autochunk = _build_autochunk(evoformer_base().cuda(), max_memory, node, pair)
|
|
|
|
|
|
|
|
# build openfold
|
2023-01-05 09:54:25 +00:00
|
|
|
chunk_size = 64
|
2023-01-06 03:39:26 +00:00
|
|
|
openfold = _build_openfold()
|
2022-12-29 06:47:47 +00:00
|
|
|
|
|
|
|
# benchmark
|
2023-01-06 03:39:26 +00:00
|
|
|
_benchmark_evoformer(model, node, pair, "base")
|
|
|
|
_benchmark_evoformer(openfold, node, pair, "openfold", chunk_size=chunk_size)
|
2022-12-29 06:47:47 +00:00
|
|
|
_benchmark_evoformer(autochunk, node, pair, "autochunk")
|
|
|
|
|
|
|
|
|
2022-12-29 06:28:38 +00:00
|
|
|
if __name__ == "__main__":
|
|
|
|
benchmark_evoformer()
|