mirror of https://github.com/hpcaitech/ColossalAI
[booster] refactor all dp fashion plugins (#3684)
* [booster] add dp plugin base * [booster] inherit dp plugin base * [booster] refactor unit testspull/3567/merge
parent
b49020c1b1
commit
d0915f54f4
@ -0,0 +1,72 @@
|
||||
import random
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
|
||||
from .plugin_base import Plugin
|
||||
|
||||
|
||||
class DPPluginBase(Plugin):
|
||||
"""This is a base class for all DP plugins. It sets up world size and rank, and provides data loader creation.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
assert dist.is_initialized(
|
||||
), 'torch.distributed is not initialized, please use colossalai.launch to create the distributed environment'
|
||||
self.rank = dist.get_rank()
|
||||
self.world_size = dist.get_world_size()
|
||||
|
||||
def prepare_train_dataloader(self,
|
||||
dataset,
|
||||
batch_size,
|
||||
shuffle=False,
|
||||
seed=1024,
|
||||
drop_last=False,
|
||||
pin_memory=False,
|
||||
num_workers=0,
|
||||
**kwargs):
|
||||
r"""
|
||||
Prepare a dataloader for distributed training. The dataloader will be wrapped by
|
||||
`torch.utils.data.DataLoader` and `torch.utils.data.DistributedSampler`.
|
||||
|
||||
Note:
|
||||
1. Evaluation datasets should not be passed to this function.
|
||||
|
||||
Args:
|
||||
dataset (`torch.utils.data.Dataset`): The dataset to be loaded.
|
||||
shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False.
|
||||
seed (int, optional): Random worker seed for sampling, defaults to 1024.
|
||||
add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True.
|
||||
drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size
|
||||
is not divisible by the batch size. If False and the size of dataset is not divisible by
|
||||
the batch size, then the last batch will be smaller, defaults to False.
|
||||
pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False.
|
||||
num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0.
|
||||
kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in
|
||||
`DataLoader <https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader>`_.
|
||||
|
||||
Returns:
|
||||
:class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing.
|
||||
"""
|
||||
_kwargs = kwargs.copy()
|
||||
sampler = DistributedSampler(dataset, num_replicas=self.world_size, rank=self.rank, shuffle=shuffle)
|
||||
|
||||
# Deterministic dataloader
|
||||
def seed_worker(worker_id):
|
||||
worker_seed = seed
|
||||
np.random.seed(worker_seed)
|
||||
torch.manual_seed(worker_seed)
|
||||
random.seed(worker_seed)
|
||||
|
||||
return DataLoader(dataset,
|
||||
batch_size=batch_size,
|
||||
sampler=sampler,
|
||||
worker_init_fn=seed_worker,
|
||||
drop_last=drop_last,
|
||||
pin_memory=pin_memory,
|
||||
num_workers=num_workers,
|
||||
**_kwargs)
|
@ -0,0 +1,85 @@
|
||||
from typing import Callable, List, Tuple, Union
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
from torch.optim import Optimizer
|
||||
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
|
||||
import colossalai
|
||||
from colossalai.booster.plugin.dp_plugin_base import DPPluginBase
|
||||
from colossalai.checkpoint_io import CheckpointIO
|
||||
from colossalai.interface import OptimizerWrapper
|
||||
from colossalai.testing import rerun_if_address_is_in_use, spawn
|
||||
|
||||
|
||||
class DPPluginWrapper(DPPluginBase):
|
||||
"""This is a wrapper class for testing DP plugin initialization and dataloader creation.
|
||||
"""
|
||||
|
||||
def configure(
|
||||
self,
|
||||
model: nn.Module,
|
||||
optimizer: Optimizer,
|
||||
criterion: Callable = None,
|
||||
dataloader: DataLoader = None,
|
||||
lr_scheduler: LRScheduler = None,
|
||||
) -> Tuple[Union[nn.Module, OptimizerWrapper, LRScheduler, DataLoader]]:
|
||||
pass
|
||||
|
||||
def control_checkpoint_io(self) -> bool:
|
||||
pass
|
||||
|
||||
def control_device(self) -> bool:
|
||||
pass
|
||||
|
||||
def control_precision(self) -> bool:
|
||||
pass
|
||||
|
||||
def get_checkpoint_io(self) -> CheckpointIO:
|
||||
pass
|
||||
|
||||
def support_no_sync(self) -> bool:
|
||||
pass
|
||||
|
||||
def supported_devices(self) -> List[str]:
|
||||
pass
|
||||
|
||||
def supported_precisions(self) -> List[str]:
|
||||
pass
|
||||
|
||||
|
||||
def check_dataloader_sharding():
|
||||
plugin = DPPluginWrapper()
|
||||
|
||||
# create a custom dasetset with 0 to 10
|
||||
dataset = TensorDataset(torch.arange(0, 10))
|
||||
train_dataloader = plugin.prepare_train_dataloader(dataset, batch_size=2)
|
||||
|
||||
# get the first batch of data
|
||||
batch = next(iter(train_dataloader))[0].cuda()
|
||||
is_rank_0 = dist.get_rank() == 0
|
||||
|
||||
if is_rank_0:
|
||||
batch_to_compare = batch.clone()
|
||||
else:
|
||||
batch_to_compare = batch
|
||||
# pass to the rank 1 value to rank 0
|
||||
dist.broadcast(batch_to_compare, src=1)
|
||||
|
||||
# compare on rank 0
|
||||
if is_rank_0:
|
||||
assert not torch.equal(batch,
|
||||
batch_to_compare), 'Same number was found across ranks but expected it to be different'
|
||||
|
||||
|
||||
def run_dist(rank, world_size, port):
|
||||
# init dist env
|
||||
colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')
|
||||
check_dataloader_sharding()
|
||||
|
||||
|
||||
@rerun_if_address_is_in_use()
|
||||
def test_dp_plugin_dataloader():
|
||||
spawn(run_dist, 2)
|
Loading…
Reference in new issue