2022-05-31 04:00:12 +00:00
|
|
|
from abc import ABC, abstractmethod
|
2022-11-08 09:03:50 +00:00
|
|
|
from contextlib import contextmanager
|
|
|
|
from typing import Any, List, Tuple
|
|
|
|
|
|
|
|
import torch
|
2023-08-24 01:29:25 +00:00
|
|
|
from torch.utils._pytree import TreeSpec, tree_flatten, tree_unflatten
|
2022-05-31 04:00:12 +00:00
|
|
|
|
|
|
|
|
2022-12-05 09:11:06 +00:00
|
|
|
class ColoParamOpHook(ABC):
|
2022-12-12 07:39:31 +00:00
|
|
|
"""
|
|
|
|
Hook which is triggered by each operation when operands contain ColoParameter.
|
2022-06-13 08:11:53 +00:00
|
|
|
To customize it, you must inherit this abstract class, and implement ``pre_forward``,
|
2022-12-12 07:39:31 +00:00
|
|
|
``post_forward``, ``pre_backward`` and ``post_backward``.
|
|
|
|
These four methods apply a list of ColoParameter as input args.
|
2022-06-13 08:11:53 +00:00
|
|
|
"""
|
2022-05-31 04:00:12 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def pre_forward(self, params: List[torch.Tensor]) -> None:
|
|
|
|
pass
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def post_forward(self, params: List[torch.Tensor]) -> None:
|
|
|
|
pass
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def pre_backward(self, params: List[torch.Tensor]) -> None:
|
|
|
|
pass
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def post_backward(self, params: List[torch.Tensor]) -> None:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-12-05 09:11:06 +00:00
|
|
|
class ColoParamOpHookManager:
|
2022-12-12 07:39:31 +00:00
|
|
|
"""
|
|
|
|
Manage your param op hooks. It only has static methods.
|
2022-06-13 08:11:53 +00:00
|
|
|
The only static method you should call is ``use_hooks(*hooks)``.
|
|
|
|
"""
|
2023-09-19 06:20:26 +00:00
|
|
|
|
2022-12-05 09:11:06 +00:00
|
|
|
hooks: Tuple[ColoParamOpHook, ...] = tuple()
|
2022-05-31 04:00:12 +00:00
|
|
|
|
2022-06-13 08:11:53 +00:00
|
|
|
@staticmethod
|
|
|
|
@contextmanager
|
2022-12-05 09:11:06 +00:00
|
|
|
def use_hooks(*hooks: ColoParamOpHook):
|
2022-06-13 08:11:53 +00:00
|
|
|
"""Change the param op hooks you use. Nested calling is allowed.
|
|
|
|
|
2022-07-21 07:54:53 +00:00
|
|
|
Example:
|
2022-12-05 09:11:06 +00:00
|
|
|
>>> with ColoParamOpHookManager.use_hooks(*hooks):
|
2022-06-13 08:11:53 +00:00
|
|
|
>>> do_something()
|
2022-12-05 09:11:06 +00:00
|
|
|
>>> with ColoParamOpHookManager.use_hooks():
|
2022-06-13 08:11:53 +00:00
|
|
|
>>> // clear hooks
|
|
|
|
>>> do_something()
|
|
|
|
"""
|
|
|
|
try:
|
2022-12-05 09:11:06 +00:00
|
|
|
old_param_op_hooks = ColoParamOpHookManager.hooks
|
|
|
|
ColoParamOpHookManager.hooks = hooks
|
2022-06-13 08:11:53 +00:00
|
|
|
yield
|
|
|
|
finally:
|
2022-12-05 09:11:06 +00:00
|
|
|
ColoParamOpHookManager.hooks = old_param_op_hooks
|
2022-06-13 08:11:53 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _trigger_pre_forward(params: List[torch.Tensor]) -> None:
|
2022-12-05 09:11:06 +00:00
|
|
|
for hook in ColoParamOpHookManager.hooks:
|
2022-06-13 08:11:53 +00:00
|
|
|
hook.pre_forward(params)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _trigger_post_forward(params: List[torch.Tensor]) -> None:
|
2022-12-05 09:11:06 +00:00
|
|
|
for hook in ColoParamOpHookManager.hooks:
|
2022-06-13 08:11:53 +00:00
|
|
|
hook.post_forward(params)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _trigger_pre_backward(params: List[torch.Tensor]) -> None:
|
2022-12-05 09:11:06 +00:00
|
|
|
for hook in ColoParamOpHookManager.hooks:
|
2022-06-13 08:11:53 +00:00
|
|
|
hook.pre_backward(params)
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _trigger_post_backward(params: List[torch.Tensor]) -> None:
|
2022-12-05 09:11:06 +00:00
|
|
|
for hook in ColoParamOpHookManager.hooks:
|
2022-06-13 08:11:53 +00:00
|
|
|
hook.post_backward(params)
|
|
|
|
|
|
|
|
@staticmethod
|
2022-06-17 08:12:05 +00:00
|
|
|
def pre_op(params: List[torch.Tensor], *args: Any) -> list:
|
2022-12-05 09:11:06 +00:00
|
|
|
ColoParamOpHookManager._trigger_pre_forward(params)
|
2023-08-24 01:29:25 +00:00
|
|
|
# auto grad function can only recognize torch.Tensor, thus we have to flatten the input
|
|
|
|
# if one of the input requires grad, all the output will be treated as requires grad
|
|
|
|
# and will have grad fn even the corresponding input does not require grad
|
|
|
|
# we have to extract tensors requiring grad into flat list and then merge them back
|
|
|
|
grad_args, other_args, grad_flags, spec = _flatten_grad_args(args)
|
|
|
|
new_grad_args = PreFwdPostBwd.apply(params, *grad_args)
|
|
|
|
return _merge_args(new_grad_args, other_args, grad_flags, spec)
|
2022-06-13 08:11:53 +00:00
|
|
|
|
|
|
|
@staticmethod
|
2022-06-17 08:12:05 +00:00
|
|
|
def post_op(params: List[torch.Tensor], arg: Any) -> Any:
|
2022-12-05 09:11:06 +00:00
|
|
|
ColoParamOpHookManager._trigger_post_forward(params)
|
2023-08-24 01:29:25 +00:00
|
|
|
return PostFwdPreBwd.apply(params, arg)
|
2022-06-13 08:11:53 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def has_hook() -> bool:
|
2022-12-05 09:11:06 +00:00
|
|
|
return len(ColoParamOpHookManager.hooks) > 0
|
2022-06-13 08:11:53 +00:00
|
|
|
|
2022-05-31 04:00:12 +00:00
|
|
|
|
|
|
|
class PreFwdPostBwd(torch.autograd.Function):
|
|
|
|
@staticmethod
|
|
|
|
def forward(ctx, params, *args):
|
|
|
|
ctx.params = params
|
2022-12-26 07:03:54 +00:00
|
|
|
return args
|
2022-05-31 04:00:12 +00:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def backward(ctx, *grads):
|
2022-12-05 09:11:06 +00:00
|
|
|
ColoParamOpHookManager._trigger_post_backward(ctx.params)
|
2022-05-31 04:00:12 +00:00
|
|
|
return (None,) + grads
|
|
|
|
|
|
|
|
|
|
|
|
class PostFwdPreBwd(torch.autograd.Function):
|
|
|
|
@staticmethod
|
|
|
|
def forward(ctx, params, args):
|
|
|
|
ctx.params = params
|
|
|
|
return args
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def backward(ctx, *grads):
|
2022-12-05 09:11:06 +00:00
|
|
|
ColoParamOpHookManager._trigger_pre_backward(ctx.params)
|
2022-05-31 04:00:12 +00:00
|
|
|
return (None,) + grads
|
2022-06-17 08:12:05 +00:00
|
|
|
|
|
|
|
|
2022-12-26 07:03:54 +00:00
|
|
|
def _is_grad_tensor(obj) -> bool:
|
|
|
|
if torch.is_tensor(obj):
|
|
|
|
if obj.grad_fn is not None or obj.requires_grad:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2023-08-24 01:29:25 +00:00
|
|
|
def _flatten_grad_args(args) -> Tuple[list, list, List[bool], TreeSpec]:
|
|
|
|
flat_args, spec = tree_flatten(args)
|
|
|
|
grad_args = []
|
|
|
|
other_args = []
|
|
|
|
grad_flags = []
|
|
|
|
for arg in flat_args:
|
|
|
|
flag = _is_grad_tensor(arg)
|
|
|
|
grad_flags.append(flag)
|
|
|
|
if flag:
|
|
|
|
grad_args.append(arg)
|
2022-06-17 08:12:05 +00:00
|
|
|
else:
|
2023-08-24 01:29:25 +00:00
|
|
|
other_args.append(arg)
|
|
|
|
assert len(grad_args) > 0
|
|
|
|
return grad_args, other_args, grad_flags, spec
|
|
|
|
|
|
|
|
|
|
|
|
def _merge_args(grad_args, other_args, grad_flags, spec):
|
|
|
|
grad_iter = iter(grad_args)
|
|
|
|
other_iter = iter(other_args)
|
|
|
|
flat_args = [next(grad_iter) if flag else next(other_iter) for flag in grad_flags]
|
|
|
|
return tree_unflatten(flat_args, spec)
|