2022-05-09 10:55:49 +00:00
|
|
|
import torch
|
|
|
|
from colossalai.tensor.op_wrapper import colo_op_impl
|
2022-07-06 08:15:16 +00:00
|
|
|
from colossalai.tensor import ComputePattern, ComputePattern, ComputeSpec, ColoTensor
|
2022-07-11 07:51:48 +00:00
|
|
|
from colossalai.tensor import distspec, ColoTensorSpec, ShardSpec, ReplicaSpec
|
2022-05-19 04:44:59 +00:00
|
|
|
from ._utils import GeneralTensor, Number, convert_to_colo_tensor
|
2022-07-04 10:54:37 +00:00
|
|
|
from ._utils import reduce_input, reduce_grad
|
2022-05-09 10:55:49 +00:00
|
|
|
|
|
|
|
|
2022-05-19 04:44:59 +00:00
|
|
|
def colo_addmm_1Drow(input_tensor: ColoTensor, mat1: ColoTensor, mat2: ColoTensor, beta: Number,
|
|
|
|
alpha: Number) -> ColoTensor:
|
2022-05-09 10:55:49 +00:00
|
|
|
# mat1:S[1] x mat2:S[0] = Output:P
|
|
|
|
# beta * input + alpha * All-Reduce(Output) = res
|
|
|
|
|
2022-07-13 15:06:12 +00:00
|
|
|
mat1 = mat1.redistribute(ShardSpec([-1], [mat2.get_tp_world_size()]), mat2.get_process_group())
|
2022-07-11 07:51:48 +00:00
|
|
|
|
2022-05-09 10:55:49 +00:00
|
|
|
# Output:P
|
2022-05-19 04:44:59 +00:00
|
|
|
partial_output = torch.mm(mat1, mat2)
|
2022-05-09 10:55:49 +00:00
|
|
|
# Reduce(Output)
|
2022-07-13 15:06:12 +00:00
|
|
|
output = reduce_input(partial_output, mat2.get_process_group())
|
2022-05-09 10:55:49 +00:00
|
|
|
# input
|
2022-06-24 05:08:54 +00:00
|
|
|
assert not input_tensor.has_compute_spec(), 'Invalid input spec for 1Drow addmm op'
|
2022-05-19 04:44:59 +00:00
|
|
|
output = beta * input_tensor + alpha * output
|
2022-07-13 15:06:12 +00:00
|
|
|
output = ColoTensor.from_torch_tensor(output, spec=ColoTensorSpec(input_tensor.get_process_group()))
|
2022-05-09 10:55:49 +00:00
|
|
|
return output
|
|
|
|
|
|
|
|
|
2022-05-19 04:44:59 +00:00
|
|
|
def colo_addmm_1Dcol(input_tensor: ColoTensor, mat1: ColoTensor, mat2: ColoTensor, beta: Number,
|
|
|
|
alpha: Number) -> ColoTensor:
|
2022-05-09 10:55:49 +00:00
|
|
|
# mat1:B x mat2:S[1] + input:S[1] = Output:S[1]
|
2022-07-06 08:15:16 +00:00
|
|
|
compute_spec = mat2.compute_spec
|
2022-07-11 07:51:48 +00:00
|
|
|
mat1 = mat1.redistribute(ReplicaSpec())
|
2022-07-04 10:54:37 +00:00
|
|
|
mat1 = reduce_grad(mat1, mat1.get_process_group())
|
2022-05-09 10:55:49 +00:00
|
|
|
|
2022-05-19 04:44:59 +00:00
|
|
|
output_parallel = torch.addmm(input_tensor, mat1, mat2, beta=beta, alpha=alpha)
|
2022-07-11 07:51:48 +00:00
|
|
|
output_spec = ColoTensorSpec(input_tensor.get_process_group(), ShardSpec([-1], [mat2.get_tp_world_size()]),
|
2022-07-06 08:15:16 +00:00
|
|
|
ComputeSpec(ComputePattern.TP1D))
|
2022-05-19 04:44:59 +00:00
|
|
|
output = ColoTensor.from_torch_tensor(output_parallel, spec=output_spec)
|
2022-06-23 08:35:05 +00:00
|
|
|
|
2022-06-24 05:08:54 +00:00
|
|
|
if compute_spec.output_replicate:
|
|
|
|
return output.to_replicate()
|
|
|
|
else:
|
|
|
|
return output
|
2022-05-09 10:55:49 +00:00
|
|
|
|
|
|
|
|
2022-05-19 04:44:59 +00:00
|
|
|
def colo_addmm_1d(mode: str, input_tensor: ColoTensor, mat1: ColoTensor, mat2: ColoTensor, beta: Number,
|
|
|
|
alpha: Number) -> ColoTensor:
|
|
|
|
assert mode in ('row', 'col')
|
|
|
|
funcs = {'row': colo_addmm_1Drow, 'col': colo_addmm_1Dcol}
|
|
|
|
return funcs[mode](input_tensor, mat1, mat2, beta, alpha)
|
|
|
|
|
|
|
|
|
2022-05-09 10:55:49 +00:00
|
|
|
@colo_op_impl(torch.addmm)
|
2022-05-19 04:44:59 +00:00
|
|
|
def colo_addmm(input_tensor: GeneralTensor,
|
2022-07-06 08:15:16 +00:00
|
|
|
mat1: ColoTensor,
|
|
|
|
mat2: ColoTensor,
|
2022-05-19 04:44:59 +00:00
|
|
|
beta: Number = 1,
|
2022-07-06 08:15:16 +00:00
|
|
|
alpha: Number = 1,
|
2022-12-22 05:25:30 +00:00
|
|
|
**kargs) -> ColoTensor:
|
2022-05-09 10:55:49 +00:00
|
|
|
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
|
|
|
|
This method computes a linear.
|
|
|
|
"""
|
2022-07-06 08:15:16 +00:00
|
|
|
# At least one of the tensor should be ColoTensor
|
|
|
|
assert isinstance(mat2, ColoTensor)
|
|
|
|
input_tensor = convert_to_colo_tensor(input_tensor, mat2.get_process_group())
|
|
|
|
mat1 = convert_to_colo_tensor(mat1, mat2.get_process_group())
|
2022-05-09 10:55:49 +00:00
|
|
|
|
|
|
|
# Add communication logic before and after linear call.
|
|
|
|
ret_tensor = None
|
2022-06-24 05:08:54 +00:00
|
|
|
if not mat2.has_compute_spec(): # No Model Parallel Applied
|
2022-07-06 08:15:16 +00:00
|
|
|
assert mat2.is_replicate(), 'Invalid mat2 spec for native addmm op'
|
|
|
|
assert input_tensor.is_replicate(), 'Invalid input spec for native addmm op'
|
2022-07-21 02:53:15 +00:00
|
|
|
ret_tensor = ColoTensor.from_torch_tensor(
|
2022-12-22 05:25:30 +00:00
|
|
|
tensor=torch.addmm(input_tensor, mat1, mat2, beta=beta, alpha=alpha, **kargs),
|
2022-07-21 02:53:15 +00:00
|
|
|
spec=ColoTensorSpec(mat2.get_process_group()))
|
2022-07-06 08:15:16 +00:00
|
|
|
elif mat2.has_compute_pattern(ComputePattern.TP1D): # Single Model Parallel Applied
|
|
|
|
if mat2.is_shard_1drow() and input_tensor.is_replicate():
|
2022-05-19 04:44:59 +00:00
|
|
|
mode = 'row'
|
2022-07-06 08:15:16 +00:00
|
|
|
elif mat2.is_shard_1dcol() and (input_tensor.is_shard_1dcol() or input_tensor.is_shard_1drow()):
|
2022-05-19 04:44:59 +00:00
|
|
|
mode = 'col'
|
2022-05-09 10:55:49 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
2022-05-19 04:44:59 +00:00
|
|
|
ret_tensor = colo_addmm_1d(mode, input_tensor, mat1, mat2, beta, alpha)
|
2022-05-09 10:55:49 +00:00
|
|
|
else:
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
return ret_tensor
|