2023-03-28 12:25:36 +00:00
|
|
|
import random
|
2023-06-25 09:36:21 +00:00
|
|
|
from typing import Callable, Optional
|
2023-03-28 12:25:36 +00:00
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
import torch.distributed as dist
|
|
|
|
import torch.nn as nn
|
|
|
|
from coati.replay_buffer import ReplayBuffer
|
|
|
|
from torch.optim import Optimizer
|
|
|
|
from torch.utils.data import DataLoader
|
2023-04-04 07:30:01 +00:00
|
|
|
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
|
2023-03-28 12:25:36 +00:00
|
|
|
|
2023-06-25 09:36:21 +00:00
|
|
|
from colossalai.booster.plugin import TorchDDPPlugin
|
|
|
|
from colossalai.booster.plugin.torch_ddp_plugin import TorchDDPModel
|
|
|
|
|
2023-03-28 12:25:36 +00:00
|
|
|
from .naive import NaiveStrategy
|
|
|
|
from .sampler import DistributedSampler
|
|
|
|
|
|
|
|
|
|
|
|
class DDPStrategy(NaiveStrategy):
|
|
|
|
"""
|
|
|
|
Strategy for distributed training using torch.distributed.
|
|
|
|
"""
|
|
|
|
|
2023-06-25 09:36:21 +00:00
|
|
|
def __init__(self,
|
|
|
|
seed: int = 42,
|
|
|
|
plugin_initializer: Callable = TorchDDPPlugin
|
|
|
|
) -> None:
|
2023-03-28 12:25:36 +00:00
|
|
|
self.seed = seed
|
2023-06-25 09:36:21 +00:00
|
|
|
super().__init__(plugin_initializer)
|
|
|
|
|
|
|
|
def _post_init(self) -> None:
|
|
|
|
assert isinstance(self.plugin, TorchDDPPlugin), \
|
|
|
|
f'{type(self).__name__}\'s plugin is not initialized properly.'
|
2023-03-28 12:25:36 +00:00
|
|
|
|
|
|
|
def setup_distributed(self) -> None:
|
2023-06-07 02:41:16 +00:00
|
|
|
self._try_init_dist(force=True)
|
2023-03-28 12:25:36 +00:00
|
|
|
self.set_seed(self.seed)
|
|
|
|
|
|
|
|
def set_seed(self, seed: int) -> None:
|
|
|
|
random.seed(seed)
|
|
|
|
np.random.seed(seed)
|
|
|
|
torch.manual_seed(seed)
|
|
|
|
|
2023-06-25 09:36:21 +00:00
|
|
|
def backward(self, loss: torch.Tensor, model: nn.Module, optimizer: Optimizer, **kwargs) -> None:
|
|
|
|
self.booster.backward(loss, optimizer)
|
2023-03-28 12:25:36 +00:00
|
|
|
|
|
|
|
def setup_dataloader(self, replay_buffer: ReplayBuffer, pin_memory: bool = False) -> DataLoader:
|
2023-06-25 09:36:21 +00:00
|
|
|
return self.plugin.prepare_dataloader(replay_buffer,
|
|
|
|
batch_size=replay_buffer.sample_batch_size,
|
|
|
|
shuffle=True,
|
|
|
|
drop_last=True,
|
|
|
|
pin_memory=pin_memory,
|
|
|
|
collate_fn=replay_buffer.collate_fn)
|
2023-03-28 12:25:36 +00:00
|
|
|
|
|
|
|
def setup_sampler(self, dataset) -> DistributedSampler:
|
2023-06-25 09:36:21 +00:00
|
|
|
# FIXME(cwher): this is only invoked in train_on_ray, not tested after adapt Boost API.
|
2023-03-28 12:25:36 +00:00
|
|
|
return DistributedSampler(dataset, dist.get_world_size(), dist.get_rank())
|
2023-04-27 10:41:49 +00:00
|
|
|
|
|
|
|
def unwrap_model(self, model: nn.Module) -> nn.Module:
|
2023-06-25 09:36:21 +00:00
|
|
|
assert isinstance(model, TorchDDPModel), "model is not wrapped by TorchDDPModel."
|
|
|
|
return model.unwrap()
|
2023-04-27 10:41:49 +00:00
|
|
|
|
|
|
|
def save_pretrained(self,
|
|
|
|
model: nn.Module,
|
|
|
|
path: str,
|
|
|
|
only_rank0: bool = True,
|
|
|
|
tokenizer: Optional[PreTrainedTokenizerBase] = None) -> None:
|
|
|
|
if only_rank0 and dist.get_rank() != 0:
|
|
|
|
return
|
|
|
|
super().save_pretrained(model, path, only_rank0, tokenizer)
|