ColossalAI/colossalai/zero/sharded_param/sharded_tensor.py

40 lines
1.1 KiB
Python
Raw Normal View History

import torch
from colossalai.gemini.stateful_tensor import StatefulTensor, TensorState
2022-03-30 05:51:37 +00:00
class ShardedTensor(StatefulTensor):
def __init__(self, tensor: torch.Tensor, state: TensorState = TensorState.HOLD) -> None:
r"""
A tensor sharded in multiple processes. Constructed from an existing torch.Tensor instance.
"""
assert tensor.requires_grad is False
super().__init__(tensor, state)
# kept the shape, numel and dtype of the init tensor.
self._origin_shape = tensor.shape
self._origin_numel = tensor.numel()
self._origin_dtype = tensor.dtype
2022-03-30 05:51:37 +00:00
self._is_sharded = False
@property
def dtype(self) -> torch.dtype:
2022-04-22 04:12:35 +00:00
assert self._payload.dtype == self._origin_dtype
return self._payload.dtype
@property
2022-03-30 05:51:37 +00:00
def origin_numel(self) -> int:
return self._origin_numel
@property
2022-03-30 05:51:37 +00:00
def origin_shape(self) -> int:
return self._origin_shape
@property
def is_sharded(self):
return self._is_sharded
@is_sharded.setter
def is_sharded(self, flag: bool):
self._is_sharded = flag