mirror of https://github.com/hpcaitech/ColossalAI
fix
parent
65c8297710
commit
6fb1322db1
|
@ -4,7 +4,6 @@ from typing import Callable, Dict, Optional, Tuple
|
|||
import torch
|
||||
import torch.distributed
|
||||
import torch.distributed as dist
|
||||
from torch.distributed import ProcessGroup
|
||||
import torch.nn.functional as F
|
||||
from einops import rearrange
|
||||
|
||||
|
@ -443,8 +442,8 @@ class RingAttention(torch.autograd.Function):
|
|||
Tuple[dist.ProcessGroup, dist.ProcessGroup]: Inner-ring process group and inter-ring process group.
|
||||
"""
|
||||
sp_size = dist.get_world_size(sp_group)
|
||||
sp_rank = dist.get_rank(sp_group)
|
||||
tp_size = dist.get_world_size(tp_group)
|
||||
dist.get_rank(sp_group)
|
||||
dist.get_world_size(tp_group)
|
||||
|
||||
if inner_ring_size is None:
|
||||
if torch.cuda.device_count() >= dist.get_world_size():
|
||||
|
@ -476,6 +475,7 @@ class RingAttention(torch.autograd.Function):
|
|||
world_size = dist.get_world_size()
|
||||
rank = dist.get_rank()
|
||||
groups = int(world_size / sp_size)
|
||||
|
||||
# Create inner ring groups
|
||||
for group_id in range(groups):
|
||||
for i in range(inner_ring_size):
|
||||
|
@ -557,7 +557,9 @@ class RingAttention(torch.autograd.Function):
|
|||
|
||||
if RingAttention.SP_GROUP is not sp_group:
|
||||
RingAttention.SP_GROUP = sp_group
|
||||
inner_ring_group, inter_ring_group = RingAttention.get_double_ring_groups(sp_group, tp_group, inner_ring_size)
|
||||
inner_ring_group, inter_ring_group = RingAttention.get_double_ring_groups(
|
||||
sp_group, tp_group, inner_ring_size
|
||||
)
|
||||
RingAttention.INNER_RING_GROUP = inner_ring_group
|
||||
RingAttention.INTER_RING_GROUP = inter_ring_group
|
||||
else:
|
||||
|
|
|
@ -859,6 +859,7 @@ def get_gpt2_flash_attention_forward(shard_config: Optional[ShardConfig] = None)
|
|||
sp_mode = shard_config.sequence_parallelism_mode
|
||||
sp_group = shard_config.sequence_parallel_process_group
|
||||
tp_group = shard_config.tensor_parallel_process_group
|
||||
|
||||
if sp_mode == "ring_attn":
|
||||
attn_output = RingAttention.attention(
|
||||
query,
|
||||
|
|
|
@ -564,6 +564,7 @@ def get_llama_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, s
|
|||
value_states = repeat_kv(value_states, self.num_key_value_groups)
|
||||
|
||||
tp_group = shard_config.tensor_parallel_process_group
|
||||
|
||||
if sp_mode == "ring_attn":
|
||||
attn_output = RingAttention.attention(
|
||||
query_states,
|
||||
|
|
Loading…
Reference in New Issue