From f7c5485ed60aabd1e5d4c1a350701d68422ce2f8 Mon Sep 17 00:00:00 2001 From: hxwang Date: Thu, 25 Jul 2024 09:07:49 +0000 Subject: [PATCH] [chore] docstring --- colossalai/booster/plugin/moe_hybrid_parallel_plugin.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py b/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py index 4c3aece9d..beac2d037 100644 --- a/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py +++ b/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py @@ -115,8 +115,10 @@ class MoeHybridParallelPlugin(HybridParallelPlugin): """ Modified from colossalai.booster.plugin.hybrid_parallel_plugin.HybridParallelPlugin Extra Args: - ep_size (int): The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1. - force_overlap_comm (bool): For LowLevelZeroOptimizer, it might causes program hang when some experts are routed and overlap_communication is True during training. This flag is used to force overlap_communication=True. + ep_size (int): The size of expert parallelism + force_overlap_comm (bool): + For LowLevelZeroOptimizer, it might causes program hang when some experts are routed and overlap_communication is True during training. + This flag is used to force overlap_communication=True. Make sure every expert are routed when you use this. """ def __init__(