2023-07-06 04:55:23 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# -*- encoding: utf-8 -*-
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
import os
|
|
|
|
from pathlib import Path
|
|
|
|
from typing import Dict, Union
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
from internlm.core.context import Config
|
|
|
|
from internlm.core.context import global_context as gpc
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
from internlm.utils.common import get_master_node
|
2023-07-06 04:55:23 +00:00
|
|
|
from internlm.utils.logger import get_logger
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
from internlm.utils.storage_manager import init_storage_manager
|
2023-07-06 04:55:23 +00:00
|
|
|
|
|
|
|
logger = get_logger(__file__)
|
|
|
|
|
|
|
|
|
|
|
|
def get_default_parser():
|
|
|
|
"""Reads user command line and uses an argument parser to parse the input arguments.
|
|
|
|
Input arguments include configuration, host, port, world size, local rank, backend for torch.distributed.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
Namespace: Returns the parser with the default arguments, the user may add customized arguments into this parser.
|
|
|
|
"""
|
|
|
|
parser = argparse.ArgumentParser()
|
|
|
|
parser.add_argument("--config", type=str, help="path to the config file")
|
|
|
|
parser.add_argument(
|
|
|
|
"--launcher",
|
|
|
|
type=str,
|
|
|
|
default="slurm",
|
|
|
|
choices=["slurm", "torch"],
|
|
|
|
help="launcher for launching distributed environment",
|
|
|
|
)
|
|
|
|
parser.add_argument("--host", type=str, help="the master address for distributed training")
|
|
|
|
parser.add_argument("--port", type=int, default=8888, help="the master port for distributed training")
|
|
|
|
parser.add_argument("--world_size", type=int, help="world size for distributed training")
|
|
|
|
parser.add_argument("--rank", type=int, help="rank for the default process group")
|
|
|
|
parser.add_argument("--local_rank", type=int, help="local rank on the node")
|
|
|
|
parser.add_argument("--backend", type=str, default="nccl", help="backend for distributed communication")
|
|
|
|
parser.add_argument("--seed", type=int, default=1024)
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
parser.add_argument("--profiling", default=False, action="store_true", help="enable/disable profiling.")
|
2023-07-06 04:55:23 +00:00
|
|
|
return parser
|
|
|
|
|
|
|
|
|
|
|
|
def args_sanity_check():
|
|
|
|
assert gpc.config is not None, "config is not load!"
|
|
|
|
|
|
|
|
# the default model type is INTERNLM
|
|
|
|
if "model_type" not in gpc.config:
|
|
|
|
gpc.config._add_item("model_type", "INTERNLM")
|
|
|
|
|
|
|
|
# procssing the parallel config in gpc
|
|
|
|
if "zero1" not in gpc.config.parallel:
|
|
|
|
gpc.config.parallel._add_item("zero1", -1)
|
|
|
|
|
|
|
|
if "pipeline" not in gpc.config.parallel:
|
|
|
|
gpc.config.parallel._add_item("pipeline", 1)
|
|
|
|
|
|
|
|
if "tensor" not in gpc.config.parallel:
|
|
|
|
gpc.config.parallel._add_item("tensor", 1)
|
|
|
|
|
|
|
|
# processing the data config in gpc
|
|
|
|
data = gpc.config.data
|
|
|
|
|
|
|
|
assert data.seq_len is not None, "'seq_len' must be given a value"
|
|
|
|
assert data.micro_bsz is not None, "'micro_bsz' must be given a value"
|
|
|
|
|
|
|
|
if "packed_length" in data and gpc.is_rank_for_log():
|
|
|
|
logger.warning("packed_length would be ignored and will be setted as seq_len * micro_bsz.")
|
|
|
|
|
|
|
|
data._add_item("packed_length", data.seq_len * data.micro_bsz)
|
|
|
|
|
|
|
|
if "micro_num" not in data:
|
|
|
|
data._add_item("micro_num", 1)
|
|
|
|
|
|
|
|
data._add_item("gradient_accumulation", data.micro_num)
|
|
|
|
if gpc.is_rank_for_log():
|
|
|
|
logger.info(f"gradient_accumulation size will be setted to {data.micro_num}.")
|
|
|
|
|
|
|
|
# batch_size should be equal with micro_num, should not use it directly
|
|
|
|
data._add_item("batch_size", data.micro_num)
|
|
|
|
|
|
|
|
if "min_length" not in data:
|
|
|
|
data._add_item("min_length", 0)
|
|
|
|
|
|
|
|
if "train_folder" not in data:
|
|
|
|
data._add_item("train_folder", None)
|
|
|
|
|
|
|
|
if "valid_folder" not in data:
|
|
|
|
data._add_item("valid_folder", None)
|
|
|
|
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
if "valid_micro_num" not in data:
|
|
|
|
data._add_item("valid_micro_num", data.micro_num)
|
|
|
|
|
|
|
|
if "valid_every" not in data:
|
|
|
|
data._add_item("valid_every", 0)
|
|
|
|
|
2023-07-06 04:55:23 +00:00
|
|
|
if gpc.is_rank_for_log():
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " Data Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"seq_len: {data.seq_len}")
|
|
|
|
logger.info(f"micro_num: {data.micro_num}")
|
|
|
|
logger.info(f"micro_bsz: {data.micro_bsz}")
|
|
|
|
logger.info(f"packed_length: {data.packed_length}")
|
|
|
|
logger.info(f"pack_sample_into_one: {data.pack_sample_into_one}")
|
|
|
|
logger.info(f"min_length: {data.min_length}")
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
logger.info(f"valid_micro_num: {data.valid_micro_num}")
|
|
|
|
logger.info(f"valid_every: {data.valid_every}")
|
2023-07-06 04:55:23 +00:00
|
|
|
|
|
|
|
# processing the checkpoint config
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
ckpt = gpc.config.ckpt
|
|
|
|
if "enable_save_ckpt" not in ckpt:
|
|
|
|
ckpt._add_item("enable_save_ckpt", False)
|
|
|
|
|
|
|
|
# Saving checkpoint args.
|
|
|
|
if ckpt.enable_save_ckpt:
|
|
|
|
assert "checkpoint_every" in ckpt, "If enable save checkpoint, must give checkpoint_every in config.data!"
|
|
|
|
assert ckpt.checkpoint_every > 0
|
|
|
|
assert "save_ckpt_folder" in ckpt, "If enable save checkpoint, must give save_ckpt_folder in config.data!"
|
|
|
|
|
|
|
|
if "async_upload" not in ckpt:
|
|
|
|
ckpt._add_item("async_upload", False) # async defalut is False.
|
|
|
|
else:
|
|
|
|
if ckpt.async_upload:
|
|
|
|
assert "save_ckpt_folder" in ckpt
|
|
|
|
if "boto3:" not in ckpt.save_ckpt_folder:
|
|
|
|
if gpc.is_rank_for_log():
|
|
|
|
logger.warning(
|
|
|
|
"Storing ckpt on file system does not support asynchronous storage, will use sync save!"
|
|
|
|
)
|
|
|
|
ckpt.async_upload = False
|
|
|
|
else:
|
|
|
|
if "async_upload_tmp_folder" not in ckpt:
|
|
|
|
ckpt._add_item("async_upload_tmp_folder", "/dev/shm/internlm_tmp_ckpt/")
|
|
|
|
|
|
|
|
if not ckpt.async_upload:
|
|
|
|
ckpt._add_item("async_upload_tmp_folder", None)
|
|
|
|
|
|
|
|
if "snapshot_ckpt_folder" not in ckpt:
|
|
|
|
ckpt._add_item("snapshot_ckpt_folder", os.path.join(ckpt.save_ckpt_folder, "snapshot"))
|
|
|
|
|
|
|
|
if "oss_snapshot_freq" not in ckpt:
|
|
|
|
ckpt._add_item("oss_snapshot_freq", float("inf")) # if oss_snapshot_freq not given, we disable.
|
|
|
|
else:
|
|
|
|
ckpt._add_item("checkpoint_every", float("inf"))
|
|
|
|
ckpt._add_item("oss_snapshot_freq", float("inf"))
|
|
|
|
ckpt._add_item("save_ckpt_folder", None)
|
|
|
|
ckpt._add_item("async_upload", False)
|
|
|
|
ckpt._add_item("async_upload_tmp_folder", None)
|
|
|
|
ckpt._add_item("snapshot_ckpt_folder", None)
|
|
|
|
ckpt._add_item("snapshot_ckpt_folder", None)
|
|
|
|
|
|
|
|
# Loading checkpoint args.
|
|
|
|
if "load_model_only_folder" not in ckpt:
|
|
|
|
ckpt._add_item("load_model_only_folder", None)
|
|
|
|
|
|
|
|
if "load_ckpt_folder" not in ckpt:
|
|
|
|
ckpt._add_item("load_ckpt_folder", None)
|
|
|
|
|
|
|
|
if "load_optimizer" not in ckpt:
|
|
|
|
ckpt._add_item("load_optimizer", True)
|
|
|
|
|
|
|
|
if "stop_file_path" not in ckpt:
|
|
|
|
ckpt._add_item("stop_file_path", None)
|
|
|
|
|
|
|
|
if "load_given_ckpt" not in ckpt:
|
|
|
|
# If 'load_given_ckpt' is not given, we set it to False, so internlm can have opportunity
|
|
|
|
# to auto-load latest checkpoint.
|
|
|
|
ckpt._add_item("load_given_ckpt", False)
|
|
|
|
|
|
|
|
if ckpt.load_given_ckpt:
|
|
|
|
# Priority: load_given_ckpt(True) > latest_checkpoint > load_model_only_folder
|
|
|
|
if ckpt.load_ckpt_folder and ckpt.load_model_only_folder:
|
|
|
|
logger.warning(
|
|
|
|
"Detect 'load_ckpt_folder' and 'load_model_only_folder' set at the same time, \
|
|
|
|
and 'load_given_ckpt' is True, so internlm will load from 'load_ckpt_folder'"
|
|
|
|
)
|
|
|
|
ckpt.load_model_only_folder = None
|
2023-07-06 04:55:23 +00:00
|
|
|
|
|
|
|
if gpc.is_rank_for_log():
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " Ckpt Info " + "+" * 15) # pylint: disable=W1201
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
logger.info(f"is enable save ckpt: {ckpt.enable_save_ckpt}")
|
|
|
|
logger.info(f"save_ckpt_folder: {ckpt.save_ckpt_folder}")
|
|
|
|
logger.info(f"checkpoint_every: {ckpt.checkpoint_every}")
|
|
|
|
logger.info(f"load_given_ckpt: {ckpt.load_given_ckpt}")
|
|
|
|
|
|
|
|
# initialization storage manager
|
|
|
|
init_storage_manager(ckpt)
|
|
|
|
|
|
|
|
# tensorboard writer config
|
|
|
|
if "enable_tb" not in gpc.config:
|
|
|
|
gpc.config._add_item("enable_tb", True)
|
|
|
|
if "tensorboard_folder" not in gpc.config:
|
|
|
|
gpc.config._add_item(
|
|
|
|
"tensorboard_folder", os.environ["tensorboard_folder"] if "tensorboard_folder" in os.environ else None
|
|
|
|
)
|
|
|
|
if "resume_tb_folder" not in gpc.config:
|
|
|
|
gpc.config._add_item(
|
|
|
|
"resume_tb_folder", os.environ["resume_tb_folder"] if "resume_tb_folder" in os.environ else None
|
|
|
|
)
|
|
|
|
|
|
|
|
if gpc.is_rank_for_log():
|
|
|
|
logger.info(f"tensorboard_folder: {gpc.config.tensorboard_folder}")
|
|
|
|
logger.info(f"resume_tb_folder: {gpc.config.resume_tb_folder}")
|
2023-07-06 04:55:23 +00:00
|
|
|
|
|
|
|
# cudnn
|
|
|
|
torch.backends.cudnn.benchmark = gpc.config.get("cudnn_benchmark", False)
|
|
|
|
torch.backends.cudnn.deterministic = gpc.config.get("cudnn_deterministic", False)
|
|
|
|
clip_grad_norm = gpc.config.hybrid_zero_optimizer.get("clip_grad_norm", 0.0)
|
|
|
|
|
|
|
|
if gpc.is_rank_for_log():
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " Other Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"cudnn.benchmark: {torch.backends.cudnn.benchmark }")
|
|
|
|
logger.info(f"cudnn.deterministic: {torch.backends.cudnn.deterministic }")
|
|
|
|
logger.info(f"clip_grad_norm: {clip_grad_norm}")
|
|
|
|
|
2023-07-31 05:57:01 +00:00
|
|
|
model = gpc.config.model
|
|
|
|
if "dtype" not in model:
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.warning("dtype is not set, use torch.float16 by defalut!")
|
2023-07-31 05:57:01 +00:00
|
|
|
model._add_item("dtype", torch.float16)
|
2023-07-06 04:55:23 +00:00
|
|
|
else:
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
if gpc.config.model.dtype == "torch.bfloat16":
|
|
|
|
gpc.config.model.dtype = torch.bfloat16
|
|
|
|
elif gpc.config.model.dtype in ("torch.float16", "torch.half"):
|
|
|
|
gpc.config.model.dtype = torch.float16
|
|
|
|
elif gpc.config.model.dtype == "torch.float32":
|
|
|
|
gpc.config.model.dtype = torch.float32
|
|
|
|
elif gpc.config.model.dtype == "torch.tf32":
|
|
|
|
torch.backends.cudnn.allow_tf32 = True
|
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True
|
|
|
|
gpc.config.model.dtype = torch.float32
|
2023-07-06 04:55:23 +00:00
|
|
|
else:
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
assert gpc.config.model.dtype in [
|
|
|
|
"torch.float16",
|
|
|
|
"torch.half",
|
|
|
|
"torch.bfloat16",
|
|
|
|
"torch.float32",
|
|
|
|
"torch.tf32",
|
|
|
|
]
|
2023-07-31 05:57:01 +00:00
|
|
|
|
|
|
|
if "checkpoint" in model:
|
|
|
|
if model.checkpoint is True:
|
|
|
|
model.checkpoint = 1
|
|
|
|
elif model.checkpoint is False:
|
|
|
|
model.checkpoint = 0
|
|
|
|
else:
|
|
|
|
assert (
|
|
|
|
model.checkpoint >= 0 and model.checkpoint <= 1
|
|
|
|
), f'model.checkpoint: "{model.checkpoint}" should >=0 and <=1'
|
2023-07-06 04:55:23 +00:00
|
|
|
|
|
|
|
if gpc.is_rank_for_log():
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " Model Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"Model: {gpc.config.model}")
|
|
|
|
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " grad_scaler Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"grad_scaler: {gpc.config.grad_scaler}")
|
|
|
|
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " hybrid_zero_optimizer Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"hybrid_zero_optimizer: {gpc.config.hybrid_zero_optimizer}")
|
|
|
|
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " adam Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"adam: {gpc.config.adam}")
|
|
|
|
|
2023-07-07 07:54:09 +00:00
|
|
|
logger.info("+" * 15 + " beta2_scheduler Info " + "+" * 15) # pylint: disable=W1201
|
2023-07-06 04:55:23 +00:00
|
|
|
logger.info(f"beta2_scheduler: {gpc.config.beta2_scheduler}")
|
|
|
|
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
# process the model config
|
|
|
|
if "use_flash_attn" not in gpc.config.model:
|
|
|
|
gpc.config.model._add_item("use_flash_attn", True)
|
|
|
|
|
|
|
|
# process the parallel config
|
|
|
|
if "sequence_parallel" not in gpc.config.parallel:
|
|
|
|
gpc.config.parallel._add_item("sequence_parallel", False)
|
|
|
|
else:
|
|
|
|
assert not (
|
|
|
|
gpc.config.parallel.sequence_parallel is True and gpc.config.model.use_flash_attn is False
|
|
|
|
), "sequence parallel does not support use_flash_attn=False"
|
|
|
|
|
|
|
|
# feishu webhook address for alerting
|
|
|
|
if "alert_address" not in gpc.config:
|
|
|
|
gpc.config._add_item("alert_address", None)
|
|
|
|
|
|
|
|
optim_ckpt = gpc.config.hybrid_zero_optimizer
|
|
|
|
if "zero_overlap_communication" in optim_ckpt:
|
|
|
|
# Compatible with the old interfaces.
|
|
|
|
optim_ckpt._add_item("overlap_sync_grad", optim_ckpt.zero_overlap_communication)
|
|
|
|
if "overlap_sync_grad" not in optim_ckpt:
|
|
|
|
optim_ckpt._add_item("overlap_sync_grad", False)
|
|
|
|
if "overlap_sync_param" not in optim_ckpt:
|
|
|
|
optim_ckpt._add_item("overlap_sync_param", False)
|
|
|
|
if gpc.is_rank_for_log():
|
|
|
|
logger.info(
|
|
|
|
f"overlap_sync_grad:{optim_ckpt.overlap_sync_grad}, overlap_sync_param:{optim_ckpt.overlap_sync_param}"
|
|
|
|
)
|
|
|
|
|
2023-07-06 04:55:23 +00:00
|
|
|
|
|
|
|
def launch(
|
|
|
|
config: Union[str, Path, Config, Dict],
|
|
|
|
rank: int,
|
|
|
|
world_size: int,
|
|
|
|
host: str,
|
|
|
|
port: int,
|
|
|
|
backend: str = "nccl",
|
|
|
|
local_rank: int = None,
|
|
|
|
seed: int = 1024,
|
|
|
|
):
|
|
|
|
"""This function first parses the configuration arguments, using :func:`parse_args()` in case one of the input
|
|
|
|
arguments are not given. Then initialize and set distributed environment by calling global_context's functions.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config (Union[str, dict, Config]): Config file or config file path are both acceptable
|
|
|
|
rank (int): Rank for the default process group
|
|
|
|
world_size (int): World size of the default process group
|
|
|
|
host (str): The master address for distributed training
|
|
|
|
port (str): The master port for distributed training
|
|
|
|
backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl``
|
|
|
|
local_rank (int, optional):
|
|
|
|
Rank for the process on the node and is used to set the default CUDA device,
|
|
|
|
defaults to None. If local_rank = None, the default device ordinal will be calculated automatically.
|
|
|
|
seed (int, optional): Specified random seed for every process. Defaults to 1024.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
Exception: Raise exception when config type is wrong
|
|
|
|
"""
|
|
|
|
|
|
|
|
# set config
|
|
|
|
assert isinstance(
|
|
|
|
config, (Config, str, Path, dict)
|
|
|
|
), f"expected argument config to be Config, str or Path, but got {type(config)}"
|
|
|
|
if not isinstance(config, Config) and isinstance(config, dict):
|
|
|
|
config = Config(config)
|
|
|
|
if isinstance(config, (str, Path)):
|
|
|
|
config = Config.from_file(config)
|
|
|
|
gpc.load_config(config)
|
|
|
|
|
|
|
|
# init default process group
|
|
|
|
gpc.init_global_dist(rank, world_size, backend, host, port)
|
|
|
|
|
|
|
|
# init process groups for different parallel modes from config
|
|
|
|
gpc.init_parallel_groups()
|
|
|
|
|
|
|
|
# set cuda device
|
|
|
|
if torch.cuda.is_available():
|
|
|
|
# if local rank is not given, calculate automatically
|
|
|
|
gpc.set_device(local_rank)
|
|
|
|
|
|
|
|
# set the number of processes running on the same node
|
|
|
|
gpc.detect_num_processes_on_current_node()
|
|
|
|
|
|
|
|
gpc.set_seed(seed)
|
|
|
|
|
|
|
|
if gpc.is_rank_for_log():
|
|
|
|
logger.info(
|
|
|
|
f"Distributed environment is initialized, "
|
|
|
|
f"data parallel size: {gpc.data_parallel_size}, pipeline parallel size: {gpc.pipeline_parallel_size}, "
|
|
|
|
f"tensor parallel size: {gpc.tensor_parallel_size}",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def launch_from_slurm(
|
|
|
|
config: Union[str, Path, Config, Dict],
|
|
|
|
host: str,
|
|
|
|
port: int,
|
|
|
|
backend: str = "nccl",
|
|
|
|
seed: int = 1024,
|
|
|
|
):
|
|
|
|
"""A wrapper for internlm.launch for SLURM launcher by reading rank and world size from the environment variables
|
|
|
|
set by SLURM
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config (Union[str, dict, Config]): Config file or config file path are both acceptable
|
|
|
|
host (str): The master address for distributed training
|
|
|
|
port (str): The master port for distributed training
|
|
|
|
backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl``
|
|
|
|
seed (int, optional): Specified random seed for every process. Defaults to 1024.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
rank = int(os.environ["SLURM_PROCID"])
|
|
|
|
world_size = int(os.environ["SLURM_NPROCS"])
|
|
|
|
except KeyError as e:
|
|
|
|
raise RuntimeError(f"Could not find {e} in the SLURM environment")
|
|
|
|
|
|
|
|
launch(
|
|
|
|
config=config,
|
|
|
|
rank=rank,
|
|
|
|
world_size=world_size,
|
|
|
|
host=host,
|
|
|
|
port=port,
|
|
|
|
backend=backend,
|
|
|
|
seed=seed,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
def launch_from_torch(
|
|
|
|
config: Union[str, Path, Config, Dict],
|
|
|
|
backend: str = "nccl",
|
|
|
|
seed: int = 1024,
|
|
|
|
):
|
2023-07-06 04:55:23 +00:00
|
|
|
"""A wrapper for internlm.launch for torchrun or torch.distributed.launch by reading rank and world size
|
|
|
|
from the environment variables set by PyTorch
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config (Union[str, dict, Config]): Config file or config file path are both acceptable
|
|
|
|
backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl``
|
|
|
|
seed (int, optional): Specified random seed for every process. Defaults to 1024.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
rank = int(os.environ["RANK"])
|
|
|
|
local_rank = int(os.environ["LOCAL_RANK"])
|
|
|
|
world_size = int(os.environ["WORLD_SIZE"])
|
|
|
|
host = os.environ["MASTER_ADDR"]
|
|
|
|
port = int(os.environ["MASTER_PORT"])
|
|
|
|
except KeyError as e:
|
|
|
|
raise RuntimeError(f"Could not find {e} in the torch environment")
|
|
|
|
|
|
|
|
launch(
|
|
|
|
config=config,
|
|
|
|
local_rank=local_rank,
|
|
|
|
rank=rank,
|
|
|
|
world_size=world_size,
|
|
|
|
host=host,
|
|
|
|
port=port,
|
|
|
|
backend=backend,
|
|
|
|
seed=seed,
|
|
|
|
)
|
Merge develop to main (#233)
* feat(utils/writer.py): support tensorboard writer (#63)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* [Develop] Pull Main Branch (#121)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
* feat(core/scheduler): support pipeline parallel (#98)
* feat(utils/writer.py): support tensorboard writer
* feat(utils/writer.py): add class comment
* feat(core): support pipeline parallel
* fix(core): fix demo running error
* feat(solver/optimizer): add pp zero optimizer
* fix(solver/optimizer): fix word spelling error
* feat(core/scheduler): add new dir scheduler in core/
* fix(core): fix ci lint error
* feat(solver/optimizer): merge pp and nopp optimizer
* doc(usage.md): update usage doc
* feat(core/scheduler): support post func
* feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape
* feat(core/scheduler): add _load_micro_batch in base scheduler
* feat(core/scheduler): support optimizer overlap communication in pp scheduler
* feat(core/scheduler): delete data process func code
* feat(core/trainer): schedule pre processing for all schedule
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* refactor(rotaryEmbedding): refactor forward (#120)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* refactor the forward for rotary embedding
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
* feat(model/metrics.py): support calculating accuracy and perplexity m… (#91)
* feat(model/metrics.py): support calculating accuracy and perplexity metrics
* fix(model/metrics.py): fix import error
* feat(train.py): minor update
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* fix(optimizer/util.py) change inf defination
* [Dev] Pull Main (#139)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* fix(hybrid_zero_optim.py): delete math import
* Update embedding.py
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting4201 <1538303371@qq.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
* style(solver/optimizer/utils.py): fix lint error (#147)
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support not-flash-attn for pp and no-pp (#145)
* support not flash attention for no-pp
* support pipeline
* modify the config
* refactor the code
* refactor the code
* remove some unnecessary code
* fix(initialize/launch.py): set default value for use_flash_attn (#158)
* add default for use_flash_attn
* fix lint
* feat(utils/logger.py): support uniscale logger (#152)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* fix(ci_scripts/train): restore ci update
* fix(config.json): delete alert webhook
* feat(train.py): optimize func init logger
* feat(config.json): delete config.json
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(utils/evaluation.py): support evaluate (#154)
* style(internlm): fix lint error
* feat(utils/logger.py): support uniscale logger
* fix(utils/logger.py): fix import circular error
* feat(train.py): support dashboard metric panel and fix ci train config
* fix(ci_scripts/train/slurm_train.sh): fix ci train error
* fix(ci_scripts/train/torchrun.sh): fix ci train error
* feat(utils/evaluation.py): support evaluate on validation dataset
* fix(utils/evaluation.py): fix demo error
* fix(ci_scripts/train/ci_7B_sft.py): fix ci train error
* feat(initialize/launch.py): set default value for valid_bsz and valid_every
* fix(ci_scripts/train): restore ci update
* docs(configs/7B_sft.py): update comment for config
* fix(config.json): delete config.json
* fix evaluation bug in scheduler when use_flash_attn=False
* feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp
* modify the jugement in pp and no-pp scheduler
* modify the data_process_func in evaluation
* fix bugs when use_flash_attn=False
* rename symbol
* feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num
* feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(*): support no apex (#166)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* refactor(*): refactor the code with no-apex (#170)
* support no-apex
* add default for use_apex
* fix lint
* modify the RMSNormTorch
* remove some comments
* remove use_apex parameter
* remove some unnecessary code
* optimize the code including import
* remove the import RMSNorm
* remove warnings
* refactor(scheduler): rewrite pipeline scheduler (#138)
* refactor(scheduler): rewrite pipeline scheduler
* fix(*): fix pipeline scheduler bugs
* fix(*): fix merge bug
* feat(*): update codes with todo tag
* feat(*): add comments
* feat(internlm/core/scheduler): update recv_prev/next logic
* feat(utils/evaluation.py): update sche metric hook for valid
---------
Co-authored-by: huangting.p <huangting@sensetime.com>
* feat(*): support fp32 training (#155)
* support float32 training
* fix lint
* add adaptation in model/utils.py
* remove some unnecessary code
* fix lint
* feat(optim): add support for fp32 zero
* Revert "Merge pull request #2 from SolenoidWGT/fp32_zero"
This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing
changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3.
revert commit
* merge develop
* Update utils.py
* support fp32 in zero optimizer
* modify the dtype
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(*): support sequence_parallel (#180)
* support sequence_parallel for no pipeline
* sequence_parallel does not support no-flash-attn
* support sequence parallel for pipeline
* add memory profiler
* Update 13B.py
* add memory profiler
* fix evaluation bug
* remove some unnecessary code
* remove some unnecessary code
* Update parallel_context.py
* modify the config
* remove memory profiler
* modify the config
* support selective dropout
* feat(monitor): support monitor and alert (#175)
* feat(monitor): support monitor and alert
* feat(monitor.py): fix demo error
* feat(monitor.py): move cmd monitor args to config file
* feat(hybrid_zero_optim.py): if overflow occurs send alert msg
* feat(monitor.py): remove alert msg filter
* feat(monitor.py): optimize class MonitorTracker
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(monitor.py): optimize code
* feat(train.py): update print to log
* style(ci): fix lint error
* fix(utils/evaluation.py): remove useless code
* fix(model/modeling_internlm.py): fix lint error
---------
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* feat(ckpt): add async upload and ckpt snapshot (#161)
* use fp16 in instruction (#80)
* delete torch_dtype of README's example code (#100)
* feat(ckpt): support async ckpt upload and ckpt snapshot
---------
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#189)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192)
This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b.
* refactor(solver/optimizer): improve optimizer memory (#193)
* refactor(solver/optimizer): improve optimizer memory
* feat(data): remove useless dataset type ids map
* Feat/optimizer (#194)
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimier.py): reduce memory footprint and avoid _check_overflow call
* feat(optimizer.py): overlap compute norm with allreduce
* update var and function name
* update function compute norm (#197)
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196)
* support gradients allreduce and compute norm overlap
* fix para set error
* remove timer cal_norm for testing
* feat(optimizer/hybrid_zero_optim.py): support group global norm
* format(lint): fix lint error
* feat(optimizer/store.py): update code based on comment
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: huangting4201 <1538303371@qq.com>
* fix(ci): fix ci train error (#199)
* fix/ci train error (#200)
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(ci): fix ci train error
* fix(train.py): fix scheduler metric hook skip error (#204)
* Merge main to develop (#203)
* fix/fix_submodule_err (#61)
* fix/fix_submodule_err
---------
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
* fix issue templates (#65)
* fix(tokenizer): refactor tokenizer and update usage in readme (#51)
* update tokenizer example
* fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73)
* fix a typo in readme
* in order to find InternLMTokenizer, select a lower version of Transformers
---------
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
* [Doc] Add wechat and discord link in readme (#78)
* Doc:add wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* Doc:update wechat and discord link
* [Docs]: add Japanese README (#43)
* Add Japanese README
* Update README-ja-JP.md
replace message
* Update README-ja-JP.md
* add repetition_penalty in GenerationConfig in web_demo.py (#48)
Co-authored-by: YWMditto <862779238@qq.com>
* use fp16 in instruction (#80)
* [Enchancement] add more options for issue template (#77)
* [Enchancement] add more options for issue template
* update qustion icon
* fix link
* Use tempfile for convert2hf.py (#23)
Fix https://github.com/InternLM/InternLM/issues/50
* delete torch_dtype of README's example code (#100)
* set the value of repetition_penalty to 1.0 to avoid random outputs (#99)
* Update web_demo.py (#97)
Remove meaningless log.
* [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106)
* docs(install.md): update dependency package transformers version to >= 4.28.0 (#124)
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* docs(LICENSE): add license (#125)
* add license of colossalai and flash-attn
* fix lint
* modify the name
* fix AutoModel map in convert2hf.py (#116)
* variables are not printly as expect (#114)
* feat(solver): fix code to adapt to torch2.0 and provide docker images (#128)
* feat(solver): fix code to adapt to torch2.0
* docs(install.md): publish internlm environment image
* docs(install.md): update dependency packages version
* docs(install.md): update default image
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
* add demo test (#132)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* fix web_demo cache accelerate (#133)
* Doc: add twitter link (#141)
* Feat add checkpoint fraction (#151)
* feat(config): add checkpoint_fraction into config
* feat: remove checkpoint_fraction from configs/7B_sft.py
---------
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* [Doc] update deployment guide to keep consistency with lmdeploy (#136)
* update deployment guide
* fix error
* use llm partition (#159)
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165)
* test: optimization of ci scripts(variables, test data cleaning, etc).
* chore(workflows): disable ci job on push.
* fix: update partition
* test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174)
* add pull_request in lint check
* use default variables in ci_scripts
* fix format
* check and install requirements automaticlly
* fix format
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* feat(profiling): add a simple memory profiler (#89)
* feat(profiling): add simple memory profiler
* feat(profiling): add profiling argument
* feat(CI_workflow): Add PR & Issue auto remove workflow (#184)
* feat(ci_workflow): Add PR & Issue auto remove workflow
Add a workflow for stale PR & Issue auto remove
- pr & issue well be labeled as stale for inactive in 7 days
- staled PR & Issue well be remove in 7 days
- run this workflow every day on 1:30 a.m.
* Update stale.yml
* feat(bot): Create .owners.yml for Auto Assign (#176)
* Create .owners.yml: for issue/pr assign automatically
* Update .owners.yml
* Update .owners.yml
fix typo
* [feat]: add pal reasoning script (#163)
* [Feat] Add PAL inference script
* Update README.md
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/pal_inference.py
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal script
* Update README.md
* restore .ore-commit-config.yaml
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update tools/README.md
Co-authored-by: BigDong <yudongwang1226@gmail.com>
* Update pal inference script
* Update READMD.md
* Update internlm/utils/interface.py
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* Update pal script
* Update pal script
* Update script
* Add docstring
* Update format
* Update script
* Update script
* Update script
---------
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
* test(ci_scripts): add timeout settings and clean work after the slurm job (#185)
* restore pr test on develop branch
* add mask
* add post action to cancel slurm job
* remove readonly attribute on job log
* add debug info
* debug job log
* try stdin
* use stdin
* set default value avoid error
* try setting readonly on job log
* performance echo
* remove debug info
* use squeue to check slurm job status
* restore the lossed parm
* litmit retry times
* use exclusive to avoid port already in use
* optimize loop body
* remove partition
* add {} for variables
* set env variable for slurm partition
---------
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
* refactor(tools): move interface.py and import it to web_demo (#195)
* move interface.py and import it to web_demo
* typo
* fix(ci): fix lint error
* fix(ci): fix lint error
---------
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210)
* feat(train.py): support torch profiler (#201)
* feat(train.py): support torch profiling
* feat(train.py): optimize initialize_llm_profile
* feat(train.py): profiling with tp0 and dp0
* move sequence parallel context manager to evalation func
* fix lint
* move the process for type_ids to load_new_batch
* fix lint
---------
Co-authored-by: yingtongxiong <974106207@qq.com>
* feat(ckpt): add auto ckpt load and singal quit (#216)
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
* feat(memory_profiler): improve memory profiler (#217)
* Feat/overlap_bcast_forward (#218)
* feat/support bcast forward overlao
* feat/optimize the bcast call
* feat/optimize the bcast call
* feat/optimize the bcast call
* fix lint
* fix lint
* fix lint
* fix lint
* add torch.cuda.synchronize in save_checkpoint
---------
Co-authored-by: sunpeng <sunpengsdu@gmail.com>
* fix(*): move sequence_parallel to parallel config (#224)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* Feat/example training internlm (#212)
* feat(train/training_internlm.py): move common init funcs to internlm/train
* feat(train/training_internlm.py): update some public funcs
* feat(train/training_internlm.py): update some public funcs
* feat(evaluation.py): adapt evaluate to streaming dataset
* feat(train/training_internlm.py): minor update based on comments
* fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0
* fix(training_internlm.py): fix demo error
* feat(data/utils.py): add new dataset type code for streaming dataset (#225)
* test(model): support fp32 with flash_attn (#223)
* support tf32 with flash
* move autocast to attention
* fix lint
* fix lint
* fix lint
* fix lint
* fix some bugs in model
* modify the convert dtype
* fix(pipeline): modify the sequence_parallel in pipeline (#227)
* move sequence_parallel to parallel config
* set the sequece_parallel default value is False
* fix lint
* fix lint
* fix lint
* modify the sequence_parallel in pp
* feat(init): add skip args check flag and add zero overlap flag (#222)
* feat(init): add skip args check flag
* fix(optim): add param overlap enable flag
* fix(ci): fix train error (#228)
Co-authored-by: huangting4201 <huangting3@sensetime.com>
* fix(writer): fix tensorboard resume bug (#229)
* fix(train.py): fix overflow grad norm error (#230)
* feat(ckpt): add train config into ckpt (#231)
---------
Co-authored-by: 黄婷 <huangting3@CN0014010744M.local>
Co-authored-by: Sun Peng <sunpengsdu@gmail.com>
Co-authored-by: ChenQiaoling00 <qiaoling_chen@u.nus.edu>
Co-authored-by: Kai Chen <chenkaidev@gmail.com>
Co-authored-by: Yang Gao <Gary1546308416AL@gmail.com>
Co-authored-by: Changjiang GOU <gouchangjiang@gmail.com>
Co-authored-by: gouhchangjiang <gouhchangjiang@gmail.com>
Co-authored-by: vansin <msnode@163.com>
Co-authored-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com>
Co-authored-by: YWMditto <862779238@qq.com>
Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com>
Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com>
Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com>
Co-authored-by: Shuo Zhang <zhangshuolove@live.com>
Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com>
Co-authored-by: huangting.p <huangting@sensetime.com>
Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com>
Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com>
Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com>
Co-authored-by: qa-caif-cicd <qa-caif-cicd@pjlab.org.cn>
Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com>
Co-authored-by: yingtongxiong <974106207@qq.com>
Co-authored-by: cx <759046501@qq.com>
Co-authored-by: wangguoteng.p <wangguoteng925@qq.com>
Co-authored-by: huangting4201 <huangting3@sensetime.com>
Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com>
Co-authored-by: lvhan028 <lvhan_028@163.com>
Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com>
Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com>
Co-authored-by: del-zhenwu <dele.zhenwu@gmail.com>
Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com>
Co-authored-by: BigDong <yudongwang1226@gmail.com>
Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>
2023-08-24 14:03:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
def initialize_distributed_env(
|
|
|
|
config: str,
|
|
|
|
launcher: str = "slurm",
|
|
|
|
master_port: int = 8888,
|
|
|
|
seed: int = 1024,
|
|
|
|
args_check=True,
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Initialize distributed environment for distributed training.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
config (str): Config file path.
|
|
|
|
launcher (str): Launcher for launching distributed environment, can be slurm or torch. "slurm" by default.
|
|
|
|
master_port (str): The master port for distributed training. 8888 by default.
|
|
|
|
seed (int, optional): Specified random seed for every process. 1024 by default.
|
|
|
|
"""
|
|
|
|
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
if launcher == "torch":
|
|
|
|
launch_from_torch(config=config, seed=seed)
|
|
|
|
elif launcher == "slurm":
|
|
|
|
launch_from_slurm(
|
|
|
|
config=config,
|
|
|
|
host=get_master_node(),
|
|
|
|
port=master_port,
|
|
|
|
seed=seed,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
assert launcher in ["slurm", "torch"], "launcher only support slurm or torch"
|
|
|
|
|
|
|
|
if args_check:
|
|
|
|
args_sanity_check()
|