ColossalAI/colossalai/tensor/_ops/linear.py

67 lines
3.1 KiB
Python
Raw Normal View History

import torch
2022-04-21 06:15:48 +00:00
from colossalai.tensor.op_wrapper import colo_op_impl
from colossalai.tensor.colo_tensor import ColoTensor
2022-04-24 05:43:12 +00:00
from colossalai.context import ParallelMode
from colossalai.nn.layer.parallel_1d._utils import split_forward_gather_backward, reduce_input
from colossalai.nn.layer.utils import divide
from colossalai.core import global_context as gpc
from packaging import version
from colossalai.tensor import ComputePattern
2022-04-25 02:06:53 +00:00
2022-04-21 06:15:48 +00:00
@colo_op_impl(torch.nn.functional.linear)
def colo_linear(types, args, kwargs, pg):
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a linear.
"""
input_tensor = args[0]
weight = args[1]
if version.parse(torch.__version__) > version.parse("1.11.0"):
if len(args) == 3:
bias = args[2]
else:
bias = None
else:
bias = kwargs.get('bias', None)
2022-04-24 05:43:12 +00:00
2022-04-21 09:18:56 +00:00
if isinstance(bias, ColoTensor):
assert bias.shard_spec.num_action == 0, f"We currently only support bias is duplicated among processes in the linear operator"
2022-04-21 09:18:56 +00:00
bias = bias.torch_tensor()
# Add communication logic before and after linear call.
2022-04-21 06:15:48 +00:00
if isinstance(weight, ColoTensor):
if weight.shard_spec == None or weight.shard_spec.num_action == 0:
if isinstance(input_tensor, ColoTensor):
input_tensor = input_tensor.torch_tensor()
if isinstance(weight, ColoTensor):
weight = weight.torch_tensor()
return ColoTensor.init_from_torch_tensor(torch.nn.functional.linear(input_tensor, weight, bias))
elif weight.shard_spec.num_action == 1:
if ComputePattern.TP1DRow in weight.shard_spec.compute_patterns:
# Input:S[1] x Weight:S[0] = Output:P
# All-Reduce(Output) + bias = res
assert divide(input_tensor.shape[-1], gpc.tensor_parallel_size) == weight.size(-1), \
'Invalid shapes in 1Drow forward: input={}, weight={}. Expected last dim of input {}.'.format(
input_tensor.shape, weight.size, weight.size(-1) * gpc.tensor_parallel_size)
# Input:S[1]
if isinstance(input_tensor, ColoTensor):
input_tensor = input_tensor.torch_tensor()
parallel_action = weight.shard_spec.get_action_by_compute_pattern(ComputePattern.TP1DRow)
input_per_partition = split_forward_gather_backward(input_tensor, parallel_action.parallel_mode, dim=-1)
# Output:P
weight_ = weight.torch_tensor()
partial_output = torch.nn.functional.linear(input_per_partition, weight_)
# Reduce(Output)
output = reduce_input(partial_output, ParallelMode.PARALLEL_1D)
# Bias
if bias is not None:
output = output + bias
return ColoTensor.init_from_torch_tensor(output)
else:
raise NotImplementedError
2022-04-24 05:43:12 +00:00
else:
raise NotImplementedError
else:
return torch.nn.functional.linear(input_tensor, weight, bias)