Browse Source

fix

pull/6071/head
wangbluo 1 month ago
parent
commit
23199e34cc
  1. 63
      colossalai/shardformer/layer/attn.py
  2. 2
      colossalai/shardformer/modeling/gpt2.py
  3. 3
      colossalai/shardformer/modeling/llama.py
  4. 8
      tests/test_shardformer/test_layer/test_ring_attn.py

63
colossalai/shardformer/layer/attn.py

@ -7,6 +7,7 @@ import torch.distributed as dist
import torch.nn.functional as F
from einops import rearrange
from colossalai.cluster import ProcessGroupMesh
from colossalai.kernel.kernel_loader import (
FlashAttentionDaoLoader,
FlashAttentionForFloatAndCustomMaskLoader,
@ -431,7 +432,7 @@ class RingAttention(torch.autograd.Function):
INTER_RING_GROUP_COPY: dist.ProcessGroup = None
@staticmethod
def get_double_ring_groups(sp_group, tp_group, inner_ring_size=None):
def get_double_ring_groups(sp_group, inner_ring_size=None):
"""
Get 2D ring groups for the given process group. Generally, to avoid congestion, the inner ring size
shouldn't be larger than the number of NICs on each node.
@ -442,7 +443,6 @@ class RingAttention(torch.autograd.Function):
Tuple[dist.ProcessGroup, dist.ProcessGroup]: Inner-ring process group and inter-ring process group.
"""
sp_size = dist.get_world_size(sp_group)
tp_size = dist.get_world_size(tp_group) if tp_group is not None else 1
sp_rank = dist.get_rank(sp_group)
assert inner_ring_size is not None
@ -465,45 +465,22 @@ class RingAttention(torch.autograd.Function):
inner_ring_group = None
inter_ring_group = None
world_size = dist.get_world_size()
rank = dist.get_rank()
num_ring_size = world_size // num_rings
if tp_size > 1:
# Create inner ring groups
ranks = []
for i in range(num_rings):
start = i * num_ring_size
end = (i + 1) * num_ring_size
for idx in range(start, end):
inner_rank = [idx + k * tp_size for k in range(inner_ring_size) if idx + k * tp_size < end]
if len(inner_rank) == inner_ring_size and inner_rank not in ranks:
ranks.append(inner_rank)
group = dist.new_group(inner_rank)
if rank in inner_rank:
inner_ring_group = group
# Create inter ring groups
for i in range(num_ring_size):
inter_rank = [i + j * num_ring_size for j in range(num_rings)]
group = dist.new_group(inter_rank)
if rank in inter_rank:
inter_ring_group = group
inter_axis, inner_axis = 0, 1
pg_mesh = ProcessGroupMesh(num_rings, inner_ring_size)
else:
# Create inner ring groups
for i in range(inner_ring_size):
ranks = list(range(i * inner_ring_size, (i + 1) * inner_ring_size))
group = dist.new_group(ranks)
if sp_rank in ranks:
inner_ring_group = group
# Create inter ring groups
for i in range(num_rings):
ranks = list(range(i, sp_size, num_rings))
group = dist.new_group(ranks)
if sp_rank in ranks:
inter_ring_group = group
# Create inner ring groups
for i in range(inner_ring_size):
ranks = list(range(i * inner_ring_size, (i + 1) * inner_ring_size))
group = pg_mesh.get_group_along_axis(inner_axis)
if sp_rank in ranks:
inner_ring_group = group
# Create inter ring groups
for i in range(num_rings):
ranks = list(range(i, sp_size, num_rings))
group = pg_mesh.get_group_along_axis(inter_axis)
if sp_rank in ranks:
inter_ring_group = group
return inner_ring_group, inter_ring_group
@ -522,7 +499,6 @@ class RingAttention(torch.autograd.Function):
deterministic=False,
return_softmax=False,
inner_ring_size=None,
tp_group=None,
**kwargs,
):
"""
@ -570,9 +546,7 @@ class RingAttention(torch.autograd.Function):
if inner_ring_size != None:
RingAttention.SP_GROUP = sp_group
inner_ring_group, inter_ring_group = RingAttention.get_double_ring_groups(
sp_group, tp_group, inner_ring_size
)
inner_ring_group, inter_ring_group = RingAttention.get_double_ring_groups(sp_group, inner_ring_size)
RingAttention.INNER_RING_GROUP = inner_ring_group
RingAttention.INTER_RING_GROUP = inter_ring_group
else:
@ -619,7 +593,6 @@ class RingAttention(torch.autograd.Function):
attention_mask_type == AttnMaskType.PADDED_CAUSAL,
inner_ring_group,
inter_ring_group,
tp_group,
)
if attention_mask_type == AttnMaskType.PADDED_CAUSAL:

2
colossalai/shardformer/modeling/gpt2.py

@ -858,7 +858,6 @@ def get_gpt2_flash_attention_forward(shard_config: Optional[ShardConfig] = None)
sp_mode = shard_config.sequence_parallelism_mode
sp_group = shard_config.sequence_parallel_process_group
tp_group = shard_config.tensor_parallel_process_group
if sp_mode == "ring_attn":
attn_output = RingAttention.attention(
@ -870,7 +869,6 @@ def get_gpt2_flash_attention_forward(shard_config: Optional[ShardConfig] = None)
dropout_p=dropout_p,
scale=scale,
inner_ring_size=shard_config.inner_ring_size,
tp_group=tp_group,
)
else:
attn_output = ColoAttention.attention(query, key, value, **attention_mask, dropout_p=dropout_p, scale=scale)

3
colossalai/shardformer/modeling/llama.py

@ -563,8 +563,6 @@ def get_llama_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, s
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
tp_group = shard_config.tensor_parallel_process_group
if sp_mode == "ring_attn":
attn_output = RingAttention.attention(
query_states,
@ -573,7 +571,6 @@ def get_llama_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, s
sp_group,
**attention_mask,
inner_ring_size=shard_config.inner_ring_size,
tp_group=tp_group,
)
elif shard_config.enable_flash_attention:

8
tests/test_shardformer/test_layer/test_ring_attn.py

@ -5,7 +5,6 @@ from flash_attn import flash_attn_qkvpacked_func, flash_attn_varlen_qkvpacked_fu
from torch.testing import assert_close
import colossalai
from colossalai.cluster import ProcessGroupMesh
from colossalai.shardformer.layer import AttnMaskType
from colossalai.shardformer.layer.attn import AttnMaskType, RingAttention
from colossalai.shardformer.layer.utils import split_batch_zigzag, split_varlen_zigzag
@ -21,10 +20,8 @@ from colossalai.utils import get_current_device
def check_ring_attn(seq_len, bs, nheads, d, dtype, sp_size, tp_size=1):
torch.cuda.manual_seed(2)
device = get_current_device()
sp_axis, tp_axis = 0, 1
pg_mesh = ProcessGroupMesh(sp_size, tp_size)
tp_group = pg_mesh.get_group_along_axis(tp_axis)
sp_group = pg_mesh.get_group_along_axis(sp_axis)
sp_group = dist.group.WORLD
sp_size = dist.get_world_size()
# Some outliers may seem large, but our errors are still lower than
# than Megatron-LM context parallel's
# (https://github.com/NVIDIA/TransformerEngine/blob/33a3d02f81c56e6f7b542c09bfa86657078d57fb/tests/pytorch/fused_attn/run_fused_attn_with_cp.py#L215)
@ -47,7 +44,6 @@ def check_ring_attn(seq_len, bs, nheads, d, dtype, sp_size, tp_size=1):
AttnMaskType.CAUSAL,
return_softmax=True,
inner_ring_size=max(2, sp_size // 2),
tp_group=tp_group,
)
ring_out = ring_out.transpose(1, 2)
out, lse, _ = flash_attn_qkvpacked_func(

Loading…
Cancel
Save