2022-06-22 03:54:36 +00:00
|
|
|
import functools
|
2022-06-10 06:48:28 +00:00
|
|
|
from time import time
|
2022-11-02 08:11:34 +00:00
|
|
|
from typing import List, Optional, Tuple
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
2022-10-09 01:18:51 +00:00
|
|
|
from colossalai.gemini.chunk import Chunk, ChunkManager
|
2022-12-07 15:04:02 +00:00
|
|
|
from colossalai.gemini.memory_tracer import MemStats
|
2022-11-02 08:11:34 +00:00
|
|
|
|
2022-12-07 15:04:02 +00:00
|
|
|
from .memory_tracer import ChunkMemStatsCollector
|
2022-06-29 05:31:02 +00:00
|
|
|
from .placement_policy import PlacementPolicyFactory
|
2022-06-10 06:48:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
class GeminiManager:
|
|
|
|
"""
|
|
|
|
Stateful Tensor Manager, inspired from PatrickStar
|
|
|
|
|
|
|
|
PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management
|
|
|
|
https://arxiv.org/abs/2108.05818
|
2022-07-21 07:54:53 +00:00
|
|
|
|
|
|
|
Args:
|
|
|
|
placement_policy (str): Which device to place *held* tensors. It can be 'cpu', 'cuda' and 'auto'.
|
|
|
|
If it's 'cpu', parameters, gradients and optimizer states will be offloaded to CPU, which means min CUDA memory will be used.
|
|
|
|
If it's 'cuda', they won't be offloaded, which means max CUDA memory will be used.
|
|
|
|
If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well.
|
|
|
|
Note that 'auto' policy can only work well when no other processes use CUDA during your training.
|
|
|
|
chunk_manager (ChunkManager): A ``ChunkManager`` instance.
|
2022-12-12 10:06:16 +00:00
|
|
|
memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration.
|
2022-06-10 06:48:28 +00:00
|
|
|
"""
|
|
|
|
|
2022-12-07 15:04:02 +00:00
|
|
|
def __init__(self, placement_policy: str, chunk_manager: ChunkManager, memstats: Optional[MemStats] = None) -> None:
|
2022-11-07 08:49:03 +00:00
|
|
|
|
2023-01-03 07:55:35 +00:00
|
|
|
assert placement_policy in PlacementPolicyFactory.get_policy_names()
|
2022-11-02 08:11:34 +00:00
|
|
|
self.policy_name = placement_policy
|
2022-06-10 06:48:28 +00:00
|
|
|
policy_cls = PlacementPolicyFactory.create(placement_policy)
|
|
|
|
self._chunk_manager = chunk_manager
|
2022-12-12 10:06:16 +00:00
|
|
|
|
|
|
|
self._premade_memstats_ = memstats is not None
|
|
|
|
self._memstats = memstats
|
2022-12-07 15:04:02 +00:00
|
|
|
self._mem_stats_collector = ChunkMemStatsCollector(chunk_manager,
|
2022-12-12 10:06:16 +00:00
|
|
|
self._memstats) if policy_cls.need_mem_stats else None
|
2022-06-10 06:48:28 +00:00
|
|
|
self._placement_policy = policy_cls(chunk_manager, self._mem_stats_collector)
|
|
|
|
self._compute_list: List[Tuple[Chunk, ...]] = []
|
|
|
|
self._compute_idx: int = -1
|
|
|
|
|
2022-06-22 03:54:36 +00:00
|
|
|
self._h2d_volume = 0
|
|
|
|
self._d2h_volume = 0
|
2022-06-10 06:48:28 +00:00
|
|
|
self._layout_time = 0
|
|
|
|
self._evict_time = 0
|
|
|
|
self._warmup = True
|
2022-06-22 03:54:36 +00:00
|
|
|
self._comp_cuda_demand_time = 0
|
2022-06-10 06:48:28 +00:00
|
|
|
|
2022-12-12 10:06:16 +00:00
|
|
|
def memstats(self):
|
|
|
|
"""memstats
|
|
|
|
|
|
|
|
get the memory statistics during training.
|
|
|
|
The stats could be collected by a runtime memory tracer, or collected by the GeminiManager.
|
2022-12-13 16:47:06 +00:00
|
|
|
Note, for the latter, you can not access the memstats before warmup iteration finishes.
|
2022-12-12 10:06:16 +00:00
|
|
|
"""
|
|
|
|
if self._premade_memstats_:
|
|
|
|
return self._memstats
|
|
|
|
else:
|
|
|
|
assert not self._warmup, "Gemini Manager has memstats after warm up! Now is during warmup."
|
|
|
|
return self._mem_stats_collector._memstats
|
|
|
|
|
2022-11-07 08:49:03 +00:00
|
|
|
def pre_iter(self, *args):
|
2022-06-10 06:48:28 +00:00
|
|
|
if self._mem_stats_collector and self._warmup:
|
2022-12-06 04:53:58 +00:00
|
|
|
self._mem_stats_collector.start_collection()
|
2022-06-10 06:48:28 +00:00
|
|
|
|
|
|
|
def post_iter(self):
|
|
|
|
"""This function must be called when each iteration finishes
|
|
|
|
"""
|
|
|
|
if self._mem_stats_collector and self._warmup:
|
|
|
|
self._mem_stats_collector.finish_collection()
|
|
|
|
self._warmup = False
|
|
|
|
self._compute_idx = -1
|
2022-06-22 03:54:36 +00:00
|
|
|
self._h2d_volume = 0
|
|
|
|
self._d2h_volume = 0
|
2022-06-10 06:48:28 +00:00
|
|
|
self._layout_time = 0
|
|
|
|
self._evict_time = 0
|
2022-06-22 03:54:36 +00:00
|
|
|
self._comp_cuda_demand_time = 0
|
2022-06-10 06:48:28 +00:00
|
|
|
|
2022-10-14 09:53:16 +00:00
|
|
|
def adjust_layout(self, chunks: Tuple[Chunk, ...]) -> None:
|
2022-11-07 05:41:13 +00:00
|
|
|
""" Adjust the layout of stateful tensors according to the information provided
|
2022-06-10 06:48:28 +00:00
|
|
|
by mem_stats_collector, which should belongs to a Sharded Model.
|
|
|
|
"""
|
|
|
|
# find stateful tensor in state COMPUTE
|
|
|
|
start = time()
|
|
|
|
self._record_chunks_order(chunks)
|
2022-10-14 09:53:16 +00:00
|
|
|
cuda_demand, hold_cuda_tensor_list = self._get_layout_info(self._compute_idx, self._warmup, chunks)
|
2022-06-10 06:48:28 +00:00
|
|
|
self._layout_time += time() - start
|
2022-10-09 01:18:51 +00:00
|
|
|
|
|
|
|
vol, evict_time = self._placement_policy.evict_tensors(can_evict_chunks=hold_cuda_tensor_list,
|
2022-06-10 06:48:28 +00:00
|
|
|
cuda_demand=cuda_demand,
|
|
|
|
warmup=self._warmup,
|
|
|
|
compute_list=self._compute_list,
|
|
|
|
compute_idx=self._compute_idx)
|
2022-10-09 01:18:51 +00:00
|
|
|
|
2022-06-22 03:54:36 +00:00
|
|
|
self._d2h_volume += vol
|
2022-06-10 06:48:28 +00:00
|
|
|
self._evict_time += evict_time
|
|
|
|
# move COMPUTE tensors to CUDA
|
2022-06-22 03:54:36 +00:00
|
|
|
self._h2d_volume += cuda_demand
|
2022-06-10 06:48:28 +00:00
|
|
|
|
2022-06-22 03:54:36 +00:00
|
|
|
@functools.lru_cache(maxsize=None)
|
2022-10-14 09:53:16 +00:00
|
|
|
def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]):
|
2022-06-22 03:54:36 +00:00
|
|
|
start = time()
|
2022-06-10 06:48:28 +00:00
|
|
|
cuda_demand = 0
|
|
|
|
for chunk in chunks:
|
2022-10-09 01:18:51 +00:00
|
|
|
if chunk.device_type == 'cuda':
|
|
|
|
if chunk.is_gathered:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
cuda_demand += chunk.chunk_mem - chunk.shard_mem
|
|
|
|
elif chunk.device_type == 'cpu':
|
|
|
|
cuda_demand += chunk.chunk_mem
|
|
|
|
else:
|
|
|
|
raise RuntimeError
|
2022-06-22 03:54:36 +00:00
|
|
|
self._comp_cuda_demand_time += time() - start
|
2022-10-09 01:18:51 +00:00
|
|
|
|
2022-10-14 09:53:16 +00:00
|
|
|
can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks()
|
2022-06-10 06:48:28 +00:00
|
|
|
return cuda_demand, can_evict_chunks
|
|
|
|
|
|
|
|
def _record_chunks_order(self, chunks: Tuple[Chunk, ...]) -> None:
|
|
|
|
self._compute_idx += 1
|
|
|
|
if self._warmup and self._placement_policy.need_mem_stats:
|
|
|
|
self._compute_list.append(chunks)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def default_device(self):
|
|
|
|
return self._placement_policy.get_default_device()
|
|
|
|
|
|
|
|
def sample_overall_data(self):
|
|
|
|
if self._mem_stats_collector:
|
|
|
|
self._mem_stats_collector.sample_overall_data()
|
|
|
|
|
2022-12-13 09:11:31 +00:00
|
|
|
def record_model_data_volume(self):
|
2022-06-10 06:48:28 +00:00
|
|
|
if self._mem_stats_collector:
|
2022-12-13 09:11:31 +00:00
|
|
|
self._mem_stats_collector.record_model_data_volume()
|
2022-06-10 06:48:28 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def chunk_manager(self):
|
|
|
|
return self._chunk_manager
|
|
|
|
|
|
|
|
@property
|
|
|
|
def cuda_margin_mem(self) -> Optional[float]:
|
|
|
|
if self._mem_stats_collector:
|
|
|
|
return self._mem_stats_collector.cuda_margin_mem
|
|
|
|
return None
|
|
|
|
|
|
|
|
@property
|
|
|
|
def is_cuda_margin_mem_avail(self) -> bool:
|
|
|
|
return self._placement_policy.need_mem_stats
|
2022-06-15 07:05:19 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_default_device(policy_name: str) -> torch.device:
|
|
|
|
return PlacementPolicyFactory.get_default_device(policy_name)
|