2023-03-21 09:39:30 +00:00
|
|
|
from functools import partial
|
|
|
|
|
|
|
|
import torch.multiprocessing as mp
|
2023-03-20 05:59:24 +00:00
|
|
|
import torch.nn as nn
|
|
|
|
|
|
|
|
from colossalai.booster.accelerator import Accelerator
|
2023-03-21 09:39:30 +00:00
|
|
|
from colossalai.testing import parameterize, rerun_if_address_is_in_use
|
2023-03-20 05:59:24 +00:00
|
|
|
|
|
|
|
|
2023-03-21 09:39:30 +00:00
|
|
|
@parameterize('device', ['cpu', 'cuda'])
|
|
|
|
def run_accelerator(device):
|
2023-03-20 05:59:24 +00:00
|
|
|
acceleartor = Accelerator(device)
|
|
|
|
model = nn.Linear(8, 8)
|
|
|
|
model = acceleartor.configure_model(model)
|
|
|
|
assert next(model.parameters()).device.type == device
|
2023-03-21 09:39:30 +00:00
|
|
|
del model, acceleartor
|
|
|
|
|
|
|
|
|
|
|
|
def run_dist(rank):
|
|
|
|
run_accelerator()
|
|
|
|
|
|
|
|
|
|
|
|
@rerun_if_address_is_in_use()
|
|
|
|
def test_accelerator():
|
|
|
|
world_size = 1
|
|
|
|
run_func = partial(run_dist)
|
|
|
|
mp.spawn(run_func, nprocs=world_size)
|