mirror of https://github.com/InternLM/InternLM
fix the ci incompatible in config
parent
007e58a4af
commit
a8dea6313f
|
@ -305,6 +305,9 @@ def args_sanity_check():
|
||||||
gpc.config.parallel.sequence_parallel is True and gpc.config.model.use_flash_attn is False
|
gpc.config.parallel.sequence_parallel is True and gpc.config.model.use_flash_attn is False
|
||||||
), "sequence parallel does not support use_flash_attn=False"
|
), "sequence parallel does not support use_flash_attn=False"
|
||||||
|
|
||||||
|
if isinstance (gpc.config.parallel["tensor"], int):
|
||||||
|
gpc.config.parallel["tensor"] = dict(size=gpc.config.parallel["tensor"], mode='origin_tp')
|
||||||
|
|
||||||
if gpc.config.parallel["tensor"].get("mode", None) is None:
|
if gpc.config.parallel["tensor"].get("mode", None) is None:
|
||||||
gpc.config.parallel["tensor"]["mode"] = "origin_tp"
|
gpc.config.parallel["tensor"]["mode"] = "origin_tp"
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue