[Tensor] add hybrid device demo and fix bugs (#1059)

pull/1063/head
Ziyue Jiang 3 years ago committed by GitHub
parent b167258b6a
commit df9dcbbff6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -51,11 +51,17 @@ class ColoDDP(torch.nn.Module):
free_storage(empty_grad) free_storage(empty_grad)
if self.dp_world_size > 1: if self.dp_world_size > 1:
grad = grad / self.dp_world_size grad = grad / self.dp_world_size
if grad.device.type != "cpu":
self.comm_stream.wait_stream(torch.cuda.current_stream()) self.comm_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.comm_stream): with torch.cuda.stream(self.comm_stream):
dist.all_reduce(grad, group=gpc.get_group(ParallelMode.DATA)) group = gpc.get_group(ParallelMode.DATA)
dist.all_reduce(grad, group=group)
ColoDDP._save_grad(p, grad) ColoDDP._save_grad(p, grad)
grad.record_stream(self.comm_stream) grad.record_stream(self.comm_stream)
else:
group = gpc.get_cpu_group(ParallelMode.DATA)
dist.all_reduce(grad, group=group)
ColoDDP._save_grad(p, grad)
else: else:
ColoDDP._save_grad(p, grad) ColoDDP._save_grad(p, grad)
return empty_grad return empty_grad

@ -12,12 +12,16 @@ def register_colo_module(module_type: type, colo_module: ColoModule):
def is_colo_module(module: torch.nn.Module): def is_colo_module(module: torch.nn.Module):
global _COLOSSAL_MODULES global _COLOSSAL_MODULES
return type(module) in _COLOSSAL_MODULES for module_type in _COLOSSAL_MODULES.keys():
if isinstance(type(module), module_type):
return True
return False
def get_colo_module(module: torch.nn.Module): def get_colo_module(module: torch.nn.Module):
global _COLOSSAL_MODULES global _COLOSSAL_MODULES
if is_colo_module(module): if is_colo_module(module):
colo_module = _COLOSSAL_MODULES[type(module)] for module_type, colo_module in _COLOSSAL_MODULES.items():
if isinstance(type(module), module_type):
return colo_module return colo_module
else: else:
return None return None

@ -92,4 +92,5 @@ class ColoInitContext(InsertPostInitMethodToModuleSubClasses):
setattr(submodule, param_name, colo_param) setattr(submodule, param_name, colo_param)
colo_param.shared_param_modules.append(submodule) colo_param.shared_param_modules.append(submodule)
module.to(self._device)
ColoModulize(module) ColoModulize(module)

@ -101,4 +101,4 @@ def test_gpt(world_size, use_ddp):
if __name__ == '__main__': if __name__ == '__main__':
test_gpt(4) test_gpt(4, False)

@ -0,0 +1,75 @@
from colossalai.utils import free_port, ColoInitContext, get_current_device
from colossalai.testing import rerun_if_address_is_in_use
from colossalai.tensor import TensorSpec, ComputePattern, ParallelAction, init_colo_module
from functools import partial
from colossalai.core import global_context as gpc
from colossalai.context import ParallelMode
from colossalai.nn.parallel import ColoDDP
import colossalai
import torch
import torch.multiprocessing as mp
import pytest
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embed = torch.nn.Embedding(20, 4)
self.proj = torch.nn.Linear(4, 8)
def forward(self, x):
# move input to cpu and restore output
current_dev = x.device
x = x.to('cpu')
x = self.embed(x)
x = x.to(current_dev)
x = self.proj(x)
return x
def run_hybrid_device(use_ddp):
with ColoInitContext(device=get_current_device()):
model = Net()
real_model = model
if use_ddp:
model = ColoDDP(model)
real_model = model.module
print(f'embedding weight size: {real_model.embed.weight.size()} | device: {real_model.embed.weight.device}')
#print(f'linear weight size: {real_model.proj.weight.size()} | device: {real_model.proj.weight.device}')
parallel_action = ParallelAction(ComputePattern.TP1D)
init_colo_module(model, parallel_action, recursive=True, mode='col')
# use cpu gloo to handle embedding
real_model.embed.to('cpu')
gloo_group_tp = gpc.get_cpu_group(ParallelMode.PARALLEL_1D)
real_model.embed.weight.spec.dist_spec.process_group = gloo_group_tp
print(f'embedding weight size: {real_model.embed.weight.size()} | new device: {real_model.embed.weight.device}')
#print(f'linear weight size: {real_model.proj.weight.size()} | new device: {real_model.proj.weight.device}')
data = torch.randint(low=0, high=20, size=(16,), device=get_current_device())
out = model(data)
out.sum().backward()
def run_dist(rank, world_size, port, use_ddp):
if use_ddp and world_size == 1:
return
tp_world_size = world_size // 2 if use_ddp else world_size
config = dict(parallel=dict(tensor=dict(mode="1d", size=tp_world_size),))
colossalai.launch(config=config, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
run_hybrid_device(use_ddp)
@pytest.mark.dist
@pytest.mark.parametrize('world_size', [1, 4])
@pytest.mark.parametrize('use_ddp', [False, True])
@rerun_if_address_is_in_use()
# Working for simulate the embedding(CPU DP+TP) -> nn(GPU DP+TP)
def _test_hybrid_device(world_size, use_ddp):
run_func = partial(run_dist, world_size=world_size, port=free_port(), use_ddp=use_ddp)
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
_test_hybrid_device(1, False)
Loading…
Cancel
Save