mirror of https://github.com/hpcaitech/ColossalAI
fix
parent
6be9862aaf
commit
fd92789af2
|
@ -488,7 +488,7 @@ class RingAttention(torch.autograd.Function):
|
|||
q, # (B, H, Sq, D)
|
||||
k,
|
||||
v,
|
||||
sp_group,
|
||||
sp_axis,
|
||||
attention_mask_type,
|
||||
cu_seqlens=None,
|
||||
max_seqlen=None,
|
||||
|
@ -499,7 +499,6 @@ class RingAttention(torch.autograd.Function):
|
|||
return_softmax=False,
|
||||
inner_ring_size=None,
|
||||
pg_mesh=None,
|
||||
sp_axis=None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
|
@ -546,7 +545,7 @@ class RingAttention(torch.autograd.Function):
|
|||
assert pg_mesh is not None, f"Error: The pg mesh is None! please check the process group initialization."
|
||||
|
||||
clone_pg = lambda pg: dist.new_group(dist.get_process_group_ranks(pg))
|
||||
|
||||
sp_group = pg_mesh.get_group_along_axis(sp_axis)
|
||||
if inner_ring_size != None:
|
||||
RingAttention.SP_GROUP = sp_group
|
||||
inner_ring_group, inter_ring_group = RingAttention.get_double_ring_groups(sp_axis, pg_mesh, inner_ring_size)
|
||||
|
|
|
@ -857,19 +857,18 @@ def get_gpt2_flash_attention_forward(shard_config: Optional[ShardConfig] = None)
|
|||
dropout_p = self.attn_dropout.p if self.training else 0.0
|
||||
|
||||
sp_mode = shard_config.sequence_parallelism_mode
|
||||
sp_group = shard_config.sequence_parallel_process_group
|
||||
shard_config.sequence_parallel_process_group
|
||||
if sp_mode == "ring_attn":
|
||||
attn_output = RingAttention.attention(
|
||||
query,
|
||||
key,
|
||||
value,
|
||||
sp_group,
|
||||
sp_axis=shard_config.sp_axis,
|
||||
**attention_mask,
|
||||
dropout_p=dropout_p,
|
||||
scale=scale,
|
||||
inner_ring_size=shard_config.inner_ring_size,
|
||||
pg_mesh=shard_config.pg_mesh,
|
||||
sp_axis=shard_config.sp_axis,
|
||||
)
|
||||
else:
|
||||
attn_output = ColoAttention.attention(query, key, value, **attention_mask, dropout_p=dropout_p, scale=scale)
|
||||
|
|
|
@ -568,11 +568,10 @@ def get_llama_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, s
|
|||
query_states,
|
||||
key_states,
|
||||
value_states,
|
||||
sp_group,
|
||||
sp_axis=shard_config.sp_axis,
|
||||
**attention_mask,
|
||||
inner_ring_size=shard_config.inner_ring_size,
|
||||
pg_mesh=shard_config.pg_mesh,
|
||||
sp_axis=shard_config.sp_axis,
|
||||
)
|
||||
|
||||
elif shard_config.enable_flash_attention:
|
||||
|
|
|
@ -44,12 +44,11 @@ def check_ring_attn(seq_len, bs, nheads, d, dtype, inner_ring_size):
|
|||
q,
|
||||
k,
|
||||
v,
|
||||
sp_group,
|
||||
sp_axis,
|
||||
AttnMaskType.CAUSAL,
|
||||
return_softmax=True,
|
||||
inner_ring_size=inner_ring_size,
|
||||
pg_mesh=pg_mesh,
|
||||
sp_axis=sp_axis,
|
||||
)
|
||||
ring_out = ring_out.transpose(1, 2)
|
||||
out, lse, _ = flash_attn_qkvpacked_func(
|
||||
|
@ -88,6 +87,7 @@ def check_packed_seq(seqlen, bs, nheads, d, dtype):
|
|||
device = get_current_device()
|
||||
sp_group = dist.group.WORLD
|
||||
sp_size = dist.get_world_size()
|
||||
sp_axis = 2
|
||||
atol = rtol = 7e-3
|
||||
torch.cuda.manual_seed(2)
|
||||
# Prepare varlen attention mask
|
||||
|
@ -128,11 +128,11 @@ def check_packed_seq(seqlen, bs, nheads, d, dtype):
|
|||
q_ring,
|
||||
k_ring,
|
||||
v_ring,
|
||||
sp_group,
|
||||
sp_axis,
|
||||
**mask_info,
|
||||
pad_output=False,
|
||||
return_softmax=True,
|
||||
pg_mesh=dist.group.WORLD,
|
||||
pg_mesh=ProcessGroupMesh(1, 1, sp_size, 1),
|
||||
# deterministic=True
|
||||
)
|
||||
ring_out = ring_out.transpose(1, 2).reshape(-1, nheads, d)
|
||||
|
|
Loading…
Reference in New Issue