mirror of https://github.com/hpcaitech/ColossalAI
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
110 lines
3.4 KiB
110 lines
3.4 KiB
3 years ago
|
from colossalai.context.singleton_meta import SingletonMeta
|
||
3 years ago
|
import torch
|
||
3 years ago
|
from typing import Tuple, Optional
|
||
3 years ago
|
from colossalai.logging import DistributedLogger
|
||
3 years ago
|
|
||
|
|
||
3 years ago
|
def colo_model_optimizer_usage(optim) -> Tuple[int, int]:
|
||
|
"""Trace the optimizer memory usage
|
||
|
|
||
|
Args:
|
||
|
optim (ShardedOptimV2): an instance of ShardedOptimver
|
||
|
|
||
|
Returns:
|
||
|
Tuple[int, int]: cuda/cpu memory usage in Byte
|
||
|
"""
|
||
|
if optim is None:
|
||
|
return 0, 0
|
||
|
assert hasattr(optim, 'get_memory_usage'), f"{type(optim)} has no attr get_memory_usage()"
|
||
|
return optim.get_memory_usage()
|
||
|
|
||
|
|
||
|
def colo_model_mem_usage(model: torch.nn.Module) -> Tuple[int, int]:
|
||
3 years ago
|
"""
|
||
|
Trace the model memory usage.
|
||
|
Args:
|
||
|
model (torch.nn.Module): a torch model
|
||
|
|
||
|
Returns:
|
||
|
Tuple[int, int]: cuda memory usage in Byte, cpu memory usage in Byte
|
||
|
"""
|
||
3 years ago
|
if model is None:
|
||
|
return 0, 0
|
||
3 years ago
|
|
||
|
def _get_tensor_mem_use(t: Optional[torch.Tensor]):
|
||
|
if t is None:
|
||
3 years ago
|
return 0, 0
|
||
3 years ago
|
assert isinstance(t, torch.Tensor)
|
||
|
_cpu_mem_usage, _cuda_mem_usage = 0, 0
|
||
|
if t.device.type == 'cpu':
|
||
|
_cpu_mem_usage += t.numel() * t.element_size()
|
||
|
elif t.device.type == 'cuda':
|
||
3 years ago
|
_cuda_mem_usage += t.numel() * t.element_size()
|
||
3 years ago
|
return _cuda_mem_usage, _cpu_mem_usage
|
||
|
|
||
|
cuda_mem_usage = 0
|
||
|
cpu_mem_usage = 0
|
||
|
for param in model.parameters():
|
||
3 years ago
|
if hasattr(param, 'colo_attr'):
|
||
|
t_cuda, t_cpu = param.colo_attr.get_memory_usage()
|
||
3 years ago
|
cuda_mem_usage += t_cuda
|
||
|
cpu_mem_usage += t_cpu
|
||
3 years ago
|
else:
|
||
|
t_cuda, t_cpu = _get_tensor_mem_use(param.data)
|
||
|
cuda_mem_usage += t_cuda
|
||
|
cpu_mem_usage += t_cpu
|
||
|
t_cuda, t_cpu = _get_tensor_mem_use(param.grad)
|
||
|
cuda_mem_usage += t_cuda
|
||
|
cpu_mem_usage += t_cpu
|
||
|
|
||
|
return cuda_mem_usage, cpu_mem_usage
|
||
|
|
||
|
|
||
3 years ago
|
class ModelDataTracer(metaclass=SingletonMeta):
|
||
|
"""
|
||
3 years ago
|
A tracer singleton to trace model data usage during runtime.
|
||
3 years ago
|
You have to register a model on the singleton first.
|
||
3 years ago
|
"""
|
||
|
|
||
|
def __init__(self) -> None:
|
||
3 years ago
|
self._logger = DistributedLogger("ModelDataTracer")
|
||
|
self._model = None
|
||
3 years ago
|
self._opitimizer = None
|
||
3 years ago
|
|
||
3 years ago
|
def _get_mem_usage(self) -> Tuple[int, int]:
|
||
|
"""
|
||
|
get the memory usage of the model registered.
|
||
|
Returns:
|
||
|
Tuple[int, int]: cuda, cpu mem usage
|
||
|
"""
|
||
3 years ago
|
cuda_use_opt, cpu_use_opt = colo_model_optimizer_usage(self._opitimizer)
|
||
|
cuda_use_model, cpu_use_model = colo_model_mem_usage(self._model)
|
||
|
return cuda_use_opt + cuda_use_model, cpu_use_opt + cpu_use_model
|
||
3 years ago
|
|
||
3 years ago
|
def register_model(self, model) -> None:
|
||
3 years ago
|
if self._model is not None:
|
||
|
self._logger.warning("ModelDataTracer has already registered a model")
|
||
3 years ago
|
self._model = model
|
||
3 years ago
|
|
||
3 years ago
|
def register_optimizer(self, optimizer) -> None:
|
||
|
if self._opitimizer is not None:
|
||
|
self._logger.warning("ModelDataTracer has already registered an optimizer")
|
||
|
self._opitimizer = optimizer
|
||
|
|
||
3 years ago
|
@property
|
||
|
def cpu_usage(self):
|
||
3 years ago
|
_, cpu_usage = self._get_mem_usage()
|
||
|
return cpu_usage
|
||
3 years ago
|
|
||
|
@property
|
||
|
def cuda_usage(self):
|
||
3 years ago
|
cuda_usage, _ = self._get_mem_usage()
|
||
|
return cuda_usage
|
||
3 years ago
|
|
||
3 years ago
|
@property
|
||
|
def both_mem_usage(self):
|
||
|
return self._get_mem_usage()
|
||
|
|
||
3 years ago
|
|
||
|
GLOBAL_MODEL_DATA_TRACER = ModelDataTracer()
|