yingtongxiong
							
						 
						
							 
							
							
							
								
							
								5abe519c4c 
								
							
								 
							
						 
						
							
							
								
								remove full weight for block 0  
							
							 
							
							
							
						 
						
							2023-10-17 16:37:06 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								5c38cb6409 
								
							
								 
							
						 
						
							
							
								
								add head overlap  
							
							 
							
							
							
						 
						
							2023-10-17 15:38:24 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								a5c6e457b9 
								
							
								 
							
						 
						
							
							
								
								Merge branch 'feat/fstp' of  https://github.com/yingtongxiong/InternLM  into feat/fstp  
							
							 
							
							
							
						 
						
							2023-10-17 15:17:03 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								6408b944c2 
								
							
								 
							
						 
						
							
							
								
								support fine grained  
							
							 
							
							
							
						 
						
							2023-10-17 15:14:39 +08:00  
						
					 
				
					
						
							
							
								 
								chenxun.p
							
						 
						
							 
							
							
							
								
							
								6682f5d92a 
								
							
								 
							
						 
						
							
							
								
								fix reduce scatter async bug  
							
							 
							
							
							
						 
						
							2023-10-17 15:10:07 +08:00  
						
					 
				
					
						
							
							
								 
								chenxun.p
							
						 
						
							 
							
							
							
								
							
								229cc5c68c 
								
							
								 
							
						 
						
							
							
								
								impl reduce scatter async  
							
							 
							
							
							
						 
						
							2023-10-17 11:15:54 +08:00  
						
					 
				
					
						
							
							
								 
								huangting4201
							
						 
						
							 
							
							
							
								
							
								d1af0d6aee 
								
							
								 
							
						 
						
							
							
								
								feat(model/linear.py): block-grained backward  
							
							 
							
							
							
						 
						
							2023-10-17 10:13:56 +08:00  
						
					 
				
					
						
							
							
								 
								huangting4201
							
						 
						
							 
							
							
							
								
							
								0d1fa037dd 
								
							
								 
							
						 
						
							
							
								
								feat(model/linear.py): set block 0 full weight  
							
							 
							
							
							
						 
						
							2023-10-16 20:13:59 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								82204eea59 
								
							
								 
							
						 
						
							
							
								
								support hybrid overlap  
							
							 
							
							
							
						 
						
							2023-10-16 16:35:14 +08:00  
						
					 
				
					
						
							
							
								 
								huangting4201
							
						 
						
							 
							
							
							
								
							
								d0f0c22cac 
								
							
								 
							
						 
						
							
							
								
								feat(model/linear.py): change pre backward from wqkv to block  
							
							 
							
							
							
						 
						
							2023-10-13 11:10:23 +08:00  
						
					 
				
					
						
							
							
								 
								huangting4201
							
						 
						
							 
							
							
							
								
							
								d0b1346993 
								
							
								 
							
						 
						
							
							
								
								feat(model/linear.py): support block allgather overlap  
							
							 
							
							
							
						 
						
							2023-10-12 19:42:08 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								5fd5a8a32b 
								
							
								 
							
						 
						
							
							
								
								support fine-grained overlap  
							
							 
							
							
							
						 
						
							2023-10-11 17:36:41 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								792b066f15 
								
							
								 
							
						 
						
							
							
								
								communication overlap  
							
							 
							
							
							
						 
						
							2023-10-11 10:57:12 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								0fac845c36 
								
							
								 
							
						 
						
							
							
								
								overlap grad_input computation and grad_weight reduce_scatter  
							
							 
							
							
							
						 
						
							2023-10-10 17:06:13 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								dd67ab948d 
								
							
								 
							
						 
						
							
							
								
								merge develop  
							
							 
							
							
							
						 
						
							2023-10-09 21:40:02 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								1b7935dd98 
								
							
								 
							
						 
						
							
							
								
								merge upstream develop  
							
							 
							
							
							
						 
						
							2023-10-09 21:35:52 +08:00  
						
					 
				
					
						
							
							
								 
								Pryest
							
						 
						
							 
							
							
								
								
							
							
								
							
								b3645b0244 
								
									
								
							
								 
							
						 
						
							
							
								
								fix(model): fix errant inference_forward ( #396 )  
							
							 
							
							... 
							
							
							
							* Fix errant inference_forward.
* Recover use_dynamic_ntk_rope.
* Fix bugs.
* Fit to flash attention 1.0
* Fit to flash attention 1.0
* Fit to flash attention 1.0.5.
* Fit to flash attention 1.0.5. 
							
						 
						
							2023-10-09 08:29:11 -05:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								007e58a4af 
								
							
								 
							
						 
						
							
							
								
								merge upstream develop  
							
							 
							
							
							
						 
						
							2023-10-09 20:54:26 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								f191853bf4 
								
							
								 
							
						 
						
							
							
								
								fix lint  
							
							 
							
							
							
						 
						
							2023-10-09 20:39:57 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								29df765f65 
								
							
								 
							
						 
						
							
							
								
								refactor code  
							
							 
							
							
							
						 
						
							2023-10-09 20:23:32 +08:00  
						
					 
				
					
						
							
							
								 
								zaglc
							
						 
						
							 
							
							
								
								
							
							
								
							
								a075153adf 
								
									
								
							
								 
							
						 
						
							
							
								
								feat(train): add fsdp training option ( #293 )  
							
							 
							
							... 
							
							
							
							* feat(fsdp): add training option for fsdp
* fix(fsdp): add mix-precision training
* fix failure in lint-check
* fix format problem
* restore 7B_sft
* fix load ckpt bug
* fix load ckpt bug2
* feat(solver/optimizer): add new file fsdp_optimizer.py
* fix(train.py): fix ci lint error
* fix(fsdp_optimizer.py): wait grad async
* fix bug for loading ckpts when zero1 < dp_size
* fix(context/parallel_context.py): only log warning for fsdp
* change ckpt name
* fix(model/modeling_internlm.py): fix checkpoint=False runtime error
* more wrap
* add support for FSDP with tp
* modify args_sanity_check for fsdp with pipeline and fsdp with moe
* fix(internlm/utils/parallel.py): fix circular import
* fix(internlm/train/training_internlm.py): remove set IS_TENSOR_PARALLEL attr
* fix(internlm/train/training_internlm.py): update wrap class and fix lint error
* fix(internlm/model): reset dropout_selective_checkpoint=True
* feat(configs/7B_sft.py): move fsdp config to parallel zero1
* feat(configs/7B_sft.py): adapt to old version config
---------
Co-authored-by: huangting4201 <1538303371@qq.com> 
							
						 
						
							2023-10-09 18:59:31 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								21c1a7fa47 
								
							
								 
							
						 
						
							
							
								
								support evaluation with fstp  
							
							 
							
							
							
						 
						
							2023-10-09 18:01:06 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								189a313da6 
								
							
								 
							
						 
						
							
							
								
								support fstp and refactor code  
							
							 
							
							
							
						 
						
							2023-10-09 17:26:20 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								bd4af3a31f 
								
							
								 
							
						 
						
							
							
								
								modify the all2all  
							
							 
							
							
							
						 
						
							2023-10-08 17:21:17 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								bf475b6940 
								
							
								 
							
						 
						
							
							
								
								debug  
							
							 
							
							
							
						 
						
							2023-10-08 13:20:29 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								10aa63f0e1 
								
							
								 
							
						 
						
							
							
								
								support optimized sp  
							
							 
							
							
							
						 
						
							2023-10-07 14:03:47 +08:00  
						
					 
				
					
						
							
							
								 
								Wenwen Qu
							
						 
						
							 
							
							
								
								
							
							
								
							
								136d55ec30 
								
									
								
							
								 
							
						 
						
							
							
								
								feat(moe): add moe module ( #182 )  
							
							 
							
							... 
							
							
							
							* feat(XXX): add moe
* reformat code
* modified:   .pre-commit-config.yaml
	modified:   internlm/model/moe.py
	modified:   internlm/model/modeling_internlm.py
* modified:   internlm/model/modeling_internlm.py
* modified:   internlm/core/context/process_group_initializer.py
	modified:   internlm/core/scheduler/no_pipeline_scheduler.py
	modified:   internlm/solver/optimizer/hybrid_zero_optim.py
* modified:   internlm/model/moe.py
	modified:   internlm/moe/sharded_moe.py
	modified:   internlm/utils/parallel.py
* rollback .pre-commit-config.yaml
* add residual and other moe features
* modify grad clipping due to moe
* add param arguments
* reformat code
* add expert data support and fix bugs
* Update .pre-commit-config.yaml
* modified:   internlm/model/modeling_internlm.py
* add no-interleaved & no-overlapped moe pp support
* support zero_overlap_communication
* avoid moe parameter partition in zero optimizer
* fix the moe_loss_coeff bug
* suppport interleaved pp
* fix moe bugs in zero optimizer
* fix more moe bugs in zero optimizer
* fix moe bugs in zero optimizer
* add logger for moe_loss
* fix bugs with merge
* fix the pp moe bugs
* fix bug on logger
* update moe training cfg on real-dataset
* refactor code
* refactor code
* fix bugs with compute moe norm
* optimize code with moe norm computing
* fix the bug that missing scale the latent moe loss
* refactor code
* fix moe loss logger for the interleaved pp
* change the scale position for latent moe_loss
* Update 7B_sft.py
* add support for moe checkpoint
* add comments for moe
* reformat code
* fix bugs
* fix bugs
* Update .pre-commit-config.yaml
* remove moe_loss_coeff parameter passing
* fix group_norms computing in hybrid_zero_optim
* use dummy mode to generate random numbers in model construction
* replace flashatten experts by feedforward experts
* fix bugs with _compute_norm_with_moe_group
* merge upstream/develop into feature_add_moe
* merge upstream/develop into feature_add_moe
* change float16 to bfloat16
* fix interface for dense pipeline
* refactor split_moe_group code
* fix precision inconsistency
* refactor code
* Update 7B_sft.py
* refactor code
* refactor code
* refactor code
* refactor code
* refactor code for split group
* refactor code for log
* fix logger for moe
* refactor code for split param group
* fix the moe_loss for ci and val
* refactor
* fix bugs with split group
* fix bugs in save/load moe checkpoint
* add moe module to `__init__.py`
* add compatible code for old version
* update moe config file
* modify moe config file
* fix merge bugs
* update moe config file
* change condition for compatibility
---------
Co-authored-by: zhanglei <ryancheung98@163.com>
Co-authored-by: Ryan (张磊) <leizhang.real@gmail.com> 
							
						 
						
							2023-09-27 15:54:53 +08:00  
						
					 
				
					
						
							
							
								 
								huangting4201
							
						 
						
							 
							
							
								
								
							
							
								
							
								3b0eff0c8a 
								
									
								
							
								 
							
						 
						
							
							
								
								fix(model/embedding.py): ci lint check error ( #345 )  
							
							 
							
							... 
							
							
							
							* fix(ci): fix ci lint error
* fix(ci): fix ci lint error 
							
						 
						
							2023-09-21 14:46:22 +08:00  
						
					 
				
					
						
							
							
								 
								YWMditto
							
						 
						
							 
							
							
								
								
							
							
								
							
								8464425a7b 
								
									
								
							
								 
							
						 
						
							
							
								
								feat(mdoel): add DynamicNTKScalingRotaryEmbedding ( #339 )  
							
							 
							
							... 
							
							
							
							* add dynamic ntk rope
* update dynamic ntk rope
* fix lint check
* fix lint check
* add more desc
---------
Co-authored-by: YWMditto <862779238@qq.com> 
							
						 
						
							2023-09-20 23:31:47 +08:00  
						
					 
				
					
						
							
							
								 
								ytxiong
							
						 
						
							 
							
							
								
								
							
							
								
							
								6a5915bf0d 
								
									
								
							
								 
							
						 
						
							
							
								
								feat(linear): optimize mlp by using jit  ( #321 )  
							
							 
							
							... 
							
							
							
							* fuse silu op
* refactor code
* fix lint
* fix lint 
							
						 
						
							2023-09-19 14:57:43 +08:00  
						
					 
				
					
						
							
							
								 
								yingtongxiong
							
						 
						
							 
							
							
							
								
							
								0c276d8de2 
								
							
								 
							
						 
						
							
							
								
								Merge remote-tracking branch 'origin/main' into develop  
							
							 
							
							
							
						 
						
							2023-09-08 10:19:54 +08:00  
						
					 
				
					
						
							
							
								 
								Season
							
						 
						
							 
							
							
								
								
							
							
								
							
								b6d909d43e 
								
									
								
							
								 
							
						 
						
							
							
								
								docs(*): add documentation and reST files for readthedocs ( #272 )  
							
							 
							
							... 
							
							
							
							* add initial reST files for readthedocs
* fix typos
* docs refine and minor fix
* add references for parallel training section
* fix reST format
* fix reST format
* fix reST format
* add comments for trainer API
* add link to step-by-step quickstart guide
* docs(code-docs/source/parallel.rst): add paper link url
* docs(code-docs/source/parallel.rst): add paper link url
* use MyST to render markdown
* docs(code-docs/source/initialize.rst): update model init
* add requirements for myst-parser
* reuse install and usage markdown
* docs(code-docs/source/index.rst): add example and q&a
* docs(doc/code-docs/*): docs refine
* docs(code-docs/source/parallel.rst): update docs for zero config
* docs(code-docs/source/example.rst): fix typos for example.rst
* docs(code-docs/source/example.rst): refine docs
* docs(code-docs/source/example): update example
* docs(code-docs/source/example): delete useless example
* docs(code-docs/source/*): fix image display issue
* docs(code-docs/source/parallel.rst): add docs for communication overlap
* docs(code-docs/source/conf.py): update conf.py
* docs(code-docs/source/example): update example 30B demo
* docs(code-docs/source/parallel.rst): update pipeline parallel
* docs(code-docs/source/parallel.rst): update pipeline parallel
* docs(code-docs/source/parallel.rst): update pipeline parallel
* docs(code-docs/source/parallel.rst): update pipeline parallel
* docs(code-docs/source/parallel.rst): update ZeRO1.5
* docs(code-docs/source/parallel.rst): update ZeRO1.5
* docs(code-docs/source): fix word spelling error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com> 
							
						 
						
							2023-09-06 15:36:03 +08:00  
						
					 
				
					
						
							
							
								 
								ytxiong
							
						 
						
							 
							
							
								
								
							
							
								
							
								9445faf5be 
								
									
								
							
								 
							
						 
						
							
							
								
								fix(model): set tensor parallel attribute for mlp ( #271 )  
							
							 
							
							... 
							
							
							
							* set is_tensor_parallel attribute for mlp
* fix lint 
							
						 
						
							2023-09-05 19:03:02 +08:00  
						
					 
				
					
						
							
							
								 
								Ryan (张磊)
							
						 
						
							 
							
							
								
								
							
							
								
							
								c92aa06bd8 
								
									
								
							
								 
							
						 
						
							
							
								
								fix(metric): argument missing in getting loss metrics. ( #256 )  
							
							 
							
							
							
						 
						
							2023-08-31 17:44:39 +08:00  
						
					 
				
					
						
							
							
								 
								YWMditto
							
						 
						
							 
							
							
								
								
							
							
								
							
								28635755f5 
								
									
								
							
								 
							
						 
						
							
							
								
								[fix bug] Fix the error that RotaryEmbedding is converted to a non-fp32 format during training, and add a compatible method for the llama model. ( #239 )  
							
							 
							
							... 
							
							
							
							Co-authored-by: YWMditto <862779238@qq.com> 
							
						 
						
							2023-08-26 17:48:08 +08:00  
						
					 
				
					
						
							
							
								 
								huangting4201
							
						 
						
							 
							
							
								
								
							
							
								
							
								54f85a6e9a 
								
									
								
							
								 
							
						 
						
							
							
								
								Merge develop to main ( #233 )  
							
							 
							
							... 
							
							
							
							* feat(utils/writer.py): support tensorboard writer (#63 )
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121 )
* fix/fix_submodule_err (#61 )
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65 )
* fix(tokenizer): refactor tokenizer and update usage in readme (#51 )
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73 )
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78 )
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43 )
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48 )
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80 )
* [Enchancement] add more options for issue template (#77 )
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23 )
Fix https://github.com/InternLM/InternLM/issues/50 
* delete torch_dtype of README's example code (#100 )
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99 )
* Update web_demo.py (#97 )
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106 )
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98 )
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120 )
* use fp16 in instruction (#80 )
* delete torch_dtype of README's example code (#100 )
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91 )
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139 )
* fix/fix_submodule_err (#61 )
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65 )
* fix(tokenizer): refactor tokenizer and update usage in readme (#51 )
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73 )
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78 )
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43 )
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48 )
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80 )
* [Enchancement] add more options for issue template (#77 )
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23 )
Fix https://github.com/InternLM/InternLM/issues/50 
* delete torch_dtype of README's example code (#100 )
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99 )
* Update web_demo.py (#97 )
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106 )
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124 )
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125 )
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116 )
* variables are not printly as expect (#114 )
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128 )
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132 )
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133 )
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147 )
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145 )
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158 )
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152 )
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154 )
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166 )
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170 )
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138 )
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155 )
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2  from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e5 , reversing
changes made to 40f24d0a73 .
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180 )
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175 )
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161 )
* use fp16 in instruction (#80 )
* delete torch_dtype of README's example code (#100 )
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189 )
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189 )" (#192 )
This reverts commit a45a91bb84 .
* refactor(solver/optimizer): improve optimizer memory (#193 )
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194 )
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197 )
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196 )
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199 )
* fix/ci train error (#200 )
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204 )
* Merge main to develop (#203 )
* fix/fix_submodule_err (#61 )
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65 )
* fix(tokenizer): refactor tokenizer and update usage in readme (#51 )
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73 )
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78 )
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43 )
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48 )
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80 )
* [Enchancement] add more options for issue template (#77 )
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23 )
Fix https://github.com/InternLM/InternLM/issues/50 
* delete torch_dtype of README's example code (#100 )
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99 )
* Update web_demo.py (#97 )
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106 )
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124 )
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125 )
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116 )
* variables are not printly as expect (#114 )
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128 )
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132 )
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133 )
* Doc: add twitter link (#141 )
* Feat add checkpoint fraction (#151 )
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136 )
* update deployment guide
* fix error
* use llm partition (#159 )
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165 )
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174 )
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89 )
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184 )
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue  auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue  well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176 )
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163 )
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185 )
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195 )
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210 )
* feat(train.py): support torch profiler (#201 )
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216 )
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217 )
* Feat/overlap_bcast_forward (#218 )
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224 )
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212 )
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225 )
* test(model): support fp32 with flash_attn (#223 )
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227 )
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222 )
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228 )
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229 )
* fix(train.py): fix overflow grad norm error (#230 )
* feat(ckpt): add train config into ckpt (#231 )
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> 
							
						 
						
							2023-08-24 22:03:04 +08:00  
						
					 
				
					
						
							
							
								 
								Guoteng
							
						 
						
							 
							
							
								
								
							
							
								
							
								6b6295aea3 
								
									
								
							
								 
							
						 
						
							
							
								
								Feat add checkpoint fraction ( #151 )  
							
							 
							
							... 
							
							
							
							* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com> 
							
						 
						
							2023-07-31 13:57:01 +08:00  
						
					 
				
					
						
							
							
								 
								Sun Peng
							
						 
						
							 
							
							
							
								
							
								fa7337b37b 
								
							
								 
							
						 
						
							
							
								
								initial commit  
							
							 
							
							
							
						 
						
							2023-07-06 12:55:23 +08:00