From 35778efff3e7644e86e64a5d82ba3e12b558a5f7 Mon Sep 17 00:00:00 2001 From: Qu Wenwen Date: Mon, 18 Dec 2023 14:02:33 +0800 Subject: [PATCH] update moe config to fit training on 8 GPU --- configs/7B_MoE4_sft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/7B_MoE4_sft.py b/configs/7B_MoE4_sft.py index cc94cdc..0672422 100644 --- a/configs/7B_MoE4_sft.py +++ b/configs/7B_MoE4_sft.py @@ -141,7 +141,7 @@ model = dict( layer_norm_epsilon=1e-5, use_flash_attn=True, num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used. - num_experts=8, + num_experts=4, moe_use_residual=False, moe_gate_k=2, )