From bc7eeade33e33e3a7c2df26fedab707f3a62d6fe Mon Sep 17 00:00:00 2001 From: wangbluo <2538539015@qq.com> Date: Tue, 15 Oct 2024 13:28:33 +0800 Subject: [PATCH] fix --- colossalai/shardformer/modeling/gpt2.py | 1 - 1 file changed, 1 deletion(-) diff --git a/colossalai/shardformer/modeling/gpt2.py b/colossalai/shardformer/modeling/gpt2.py index 90ffba8cf..d550484da 100644 --- a/colossalai/shardformer/modeling/gpt2.py +++ b/colossalai/shardformer/modeling/gpt2.py @@ -857,7 +857,6 @@ def get_gpt2_flash_attention_forward(shard_config: Optional[ShardConfig] = None) dropout_p = self.attn_dropout.p if self.training else 0.0 sp_mode = shard_config.sequence_parallelism_mode - shard_config.sequence_parallel_process_group if sp_mode == "ring_attn": attn_output = RingAttention.attention( query,