mirror of https://github.com/InternLM/InternLM
add default setting for expert parallel size
parent
0e2eb90d22
commit
86bcda5ca9
|
@ -143,7 +143,7 @@ class ParallelContext(metaclass=SingletonMeta):
|
|||
self.pipeline_parallel_size = 1
|
||||
self.tensor_parallel_size = 1
|
||||
self.zero1_parallel_size = -1
|
||||
self.expert_parallel_size = -1
|
||||
self.expert_parallel_size = 1
|
||||
self.num_processes_on_current_node = -1
|
||||
self.virtual_pipeline_parallel_size = None
|
||||
self.virtual_pipeline_parallel_rank = None
|
||||
|
|
|
@ -60,6 +60,9 @@ def args_sanity_check():
|
|||
if "tensor" not in gpc.config.parallel:
|
||||
gpc.config.parallel._add_item("tensor", 1)
|
||||
|
||||
if "expert" not in gpc.config.parallel:
|
||||
gpc.config.parallel._add_item("expert", 1)
|
||||
|
||||
# processing the data config in gpc
|
||||
data = gpc.config.data
|
||||
|
||||
|
|
Loading…
Reference in New Issue