mirror of https://github.com/hpcaitech/ColossalAI
[Zerobubble] merge main. (#6142)
* [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * [feat] moehybrid support zerobubble; * [fix] fix zerobubble pp for shardformer type input; * [feat] add more test; * [fix] fix require_grad & deallocate call; * [fix] updatw bwd b&w input; dict --> list[torch.Tensor] * [fix] fix bwd w input; * [fix] fix mem assert; * [fix] fix input_tensors buffer append input_obj(dict) --> Tuple (microbatch, input_obj) , and all bwd b related cal logic; * [fix] use tree_flatten replace dict traverse; * [fix] rm comments; * [fix] fix fwd branch, fwd pass both micro_batch & internal_inputs' * [fix] fix pipeline util func deallocate --> release_tensor_data; fix bwd_b loss bwd branch; * [fix] fix detach clone release order; * [fix] fix ci --> oom in 4096 hidden dim; * [fix] fix dumb clone; * [fix] fix detach_output_obj clone; * [fix] fix stage_indices; * [fix] fix traverse; traverse dict --> traverse tensor List; * [fix] fix zerobubble; support shardformer model type; * [fix] rm comments; * [fix] fix test_pipeline_utils ci; * [fix] remove duplicate arg; rm comments; * [fix] remove chunk 0 stage 0 bwd b; u don't have to cal micrbatch's dx; * [fix] rm print & comments; * [plugin] hybrid support zero bubble pipeline (#6060) * hybrid support zbv * fix fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * hybrid support zbv * fix fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: duanjunwen <935724073@qq.com> * [feat] zerobubble support moehybridplugin; * [feat] update optimizer bwd; ä¸ * [fix] fix build ci; * [zerobubble] rebase main (#6075) * fp8 operators for compressed communication cast_to_fp8, cast_from_fp8, all_reduce_fp8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * fix scaling algorithm in FP8 casting * support fp8 communication in pipeline parallelism * add fp8_communication flag in the script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * shardformer fp8 * fix rebase * remove all to all * fix shardformer fp8 communication training degradation * [fp8] support all-gather flat tensor (#5932) * [fp8] add fp8 comm for low level zero * [test] add zero fp8 test case * [Feature] llama shardformer fp8 support (#5938) * add llama shardformer fp8 * Llama Shardformer Parity * fix typo * fix all reduce * fix pytest failure * fix reduce op and move function to fp8.py * fix typo * [FP8] rebase main (#5963) * add SimPO * fix dataloader * remove debug code * add orpo * fix style * fix colossalai, transformers version * fix colossalai, transformers version * fix colossalai, transformers version * fix torch colossalai version * update transformers version * [shardformer] DeepseekMoE support (#5871) * [Feature] deepseek moe expert parallel implement * [misc] fix typo, remove redundant file (#5867) * [misc] fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] deepseek support & unit test * [misc] remove debug code & useless print * [misc] fix typos (#5872) * [Feature] remove modeling file, use auto config. (#5884) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [Deepseek] remove redundant code (#5888) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [Feature/deepseek] resolve comment. (#5889) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [misc] mv module replacement into if branch * [misc] add some warning message and modify some code in unit test * [misc] fix typos --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hoxfix] Fix CUDA_DEVICE_MAX_CONNECTIONS for comm overlap Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feat] Diffusion Model(PixArtAlpha/StableDiffusion3) Support (#5838) * Diffusion Model Inference support * Stable Diffusion 3 Support * pixartalpha support * [HotFix] CI,import,requirements-test for #5838 (#5892) * [Hot Fix] CI,import,requirements-test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] Enable PP + SP for llama (#5868) * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use a one cross entropy func for all shardformer models --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [ShardFormer] Add Ulysses Sequence Parallelism support for Command-R, Qwen2 and ChatGLM (#5897) * add benchmark for sft, dpo, simpo, orpo. Add benchmarking result. Support lora with gradient checkpoint * fix style * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix eval * hotfix citation * [zero] support all-gather overlap (#5898) * [zero] support all-gather overlap * [zero] add overlap all-gather flag * [misc] fix typo * [zero] update api * fix orpo cross entropy loss * [Auto Parallel]: Speed up intra-op plan generation by 44% (#5446) * Remove unnecessary calls to deepcopy * Build DimSpec's difference dict only once This change considerably speeds up construction speed of DimSpec objects. The difference_dict is the same for each DimSpec object, so a single copy of it is enough. * Fix documentation of DimSpec's difference method * [ShardFormer] fix qwen2 sp (#5903) * [compatibility] support torch 2.2 (#5875) * Support Pytorch 2.2.2 * keep build_on_pr file and update .compatibility * fix object_to_tensor usage when torch>=2.3.0 (#5820) * [misc] support torch2.3 (#5893) * [misc] support torch2.3 * [devops] update compatibility ci * [devops] update compatibility ci * [devops] add debug * [devops] add debug * [devops] add debug * [devops] add debug * [devops] remove debug * [devops] remove debug * [release] update version (#5912) * [plugin] support all-gather overlap for hybrid parallel (#5919) * [plugin] fixed all-gather overlap support for hybrid parallel * add kto * fix style, add kto data sample * [Examples] Add lazy init to OPT and GPT examples (#5924) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [ColossalChat] Hotfix for ColossalChat (#5910) * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * fix ddp issue * add Qwen 1.5 32B * refactor tokenization * [FIX BUG] UnboundLocalError: cannot access local variable 'default_conversation' where it is not associated with a value (#5931) * cannot access local variable 'default_conversation' where it is not associated with a value set default value for 'default_conversation' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix test data * refactor evaluation * remove real data path * remove real data path * Add n_fused as an input from native_module (#5894) * [FIX BUG] convert env param to int in (#5934) * [Hotfix] Fix ZeRO typo #5936 Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feature] Add a switch to control whether the model checkpoint needs to be saved after each epoch ends (#5941) * Add a switch to control whether the model checkpoint needs to be saved after each epoch ends * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix style * fix style * fix style * [shardformer] hotfix attn mask (#5945) * [shardformer] hotfix attn mask (#5947) * [Feat] Distrifusion Acceleration Support for Diffusion Inference (#5895) * Distrifusion Support source * comp comm overlap optimization * sd3 benchmark * pixart distrifusion bug fix * sd3 bug fix and benchmark * generation bug fix * naming fix * add docstring, fix counter and shape error * add reference * readme and requirement * [zero] hotfix update master params (#5951) * [release] update version (#5952) * [Chat] Fix lora (#5946) * fix merging * remove filepath * fix style * Update README.md (#5958) * [hotfix] Remove unused plan section (#5957) * remove readme * fix readme * update * [test] add mixtral for sequence classification * [test] add mixtral transformer test * [moe] fix plugin * [test] mixtra pp shard test * [chore] handle non member group * [zero] solve hang * [test] pass mixtral shardformer test * [moe] implement transit between non moe tp and ep * [zero] solve hang * [misc] solve booster hang by rename the variable * solve hang when parallel mode = pp + dp * [moe] implement submesh initialization * [moe] add mixtral dp grad scaling when not all experts are activated * [chore] manually revert unintended commit * [chore] trivial fix * [chore] arg pass & remove drop token * [test] add mixtral modelling test * [moe] implement tp * [moe] test deepseek * [moe] clean legacy code * [Feature] MoE Ulysses Support (#5918) * moe sp support * moe sp bug solve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [chore] minor fix * [moe] init moe plugin comm setting with sp * moe sp + ep bug fix * [moe] finalize test (no pp) * [moe] full test for deepseek and mixtral (pp + sp to fix) * [chore] minor fix after rebase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [chore] solve moe ckpt test failure and some other arg pass failure * [moe] remove ops * [test] fix test: test_zero1_2 * [bug] fix: somehow logger hangs the program * [moe] deepseek moe sp support * [test] add check * [deepseek] replace attn (a workaround for bug in transformers) * [misc] skip redunant test * [misc] remove debug/print code * [moe] refactor mesh assignment * Revert "[moe] implement submesh initialization" This reverts commitpull/6147/head2f9bce6686
. * [chore] change moe_pg_mesh to private * [misc] remove incompatible test config * [misc] fix ci failure: change default value to false in moe plugin * [misc] remove useless condition * [chore] docstring * [moe] remove force_overlap_comm flag and add warning instead * [doc] add MoeHybridParallelPlugin docstring * [moe] solve dp axis issue * [chore] remove redundant test case, print string & reduce test tokens * [feat] Dist Loader for Eval (#5950) * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tp error * remove unused parameters * remove unused * update inference * update docs * update inference --------- Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [lora] lora support hybrid parallel plugin (#5956) * lora support hybrid plugin * fix * fix * fix * fix * fp8 operators for compressed communication cast_to_fp8, cast_from_fp8, all_reduce_fp8 * fix scaling algorithm in FP8 casting * support fp8 communication in pipeline parallelism * add fp8_communication flag in the script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * shardformer fp8 * fix rebase * remove all to all * fix shardformer fp8 communication training degradation * [fp8] support all-gather flat tensor (#5932) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update low_level_optim.py --------- Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: Wang Binluo <32676639+wangbluo@users.noreply.github.com> Co-authored-by: HangXu <hangxu0304@gmail.com> * [fp8]support all2all fp8 (#5953) * support all2all fp8 * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [fp8] add fp8 linear (#5967) * [fp8] add fp8 linear * [test] fix fp8 linear test condition * [test] fix fp8 linear test condition * [test] fix fp8 linear test condition * [fp8] support fp8 amp for hybrid parallel plugin (#5975) * [fp8] support fp8 amp for hybrid parallel plugin * [test] add fp8 hook test * [fp8] fix fp8 linear compatibility * fix (#5976) * [Feature]: support FP8 communication in DDP, FSDP, Gemini (#5928) * support fp8_communication in the Torch DDP grad comm, FSDP grad comm, and FSDP params comm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implement communication hook for FSDP params all-gather * added unit test for fp8 operators * support fp8 communication in GeminiPlugin * update training scripts to support fsdp and fp8 communication * fixed some minor bugs observed in unit test * add all_gather_into_tensor_flat_fp8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add skip the test if torch < 2.2.0 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add skip the test if torch < 2.2.0 * add skip the test if torch < 2.2.0 * add fp8_comm flag * rebase latest fp8 operators * rebase latest fp8 operators * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [test ci]Feature/fp8 comm (#5981) * fix * fix * fix * [fp8] support gemini plugin (#5978) * [fp8] refactor hook * [fp8] support gemini plugin * [example] add fp8 option for llama benchmark * [fp8] use torch compile (torch >= 2.3.0) (#5979) * [fp8] use torch compile (torch >= 2.4.0) * [fp8] set use_fast_accum in linear * [chore] formal version check * [chore] fix sig * [fp8]Moe support fp8 communication (#5977) * fix * support moe fp8 * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix fix fi * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [fp8] support hybrid parallel plugin (#5982) * support fp8 comm for qwen2 model * support fp8 comm for qwen2 model * support fp8 comm for qwen2 model * fp8 * fix * bert and bloom * chatglm and command * gpt2,gptj,bert, falcon,blip2 * mistral,opy,sam,t5,vit,whisper * fix * fix * fix * [fp8] refactor fp8 linear with compile (#5993) * [fp8] refactor fp8 linear with compile * [fp8] fix linear test * [fp8] fix linear test * [fp8] support asynchronous FP8 communication (#5997) * fix * fix * fix * support async all2all * support async op for all gather * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [fp8] update torch.compile for linear_fp8 to >= 2.4.0 (#6004) * [fp8] linear perf enhancement * [fp8]update reduce-scatter test (#6002) * fix * fix * fix * fix * [fp8] add use_fp8 option for MoeHybridParallelPlugin (#6009) * [fp8] zero support fp8 linear. (#6006) * fix * fix * fix * zero fp8 * zero fp8 * Update requirements.txt * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix the merge * fix the merge * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix * fix * fix the merge * fix * fix * fix * fix * fix * fix the merge * fix * fix * fix * fix * [fp8] Merge feature/fp8_comm to main branch of Colossalai (#6016) * add SimPO * fix dataloader * remove debug code * add orpo * fix style * fix colossalai, transformers version * fix colossalai, transformers version * fix colossalai, transformers version * fix torch colossalai version * update transformers version * [shardformer] DeepseekMoE support (#5871) * [Feature] deepseek moe expert parallel implement * [misc] fix typo, remove redundant file (#5867) * [misc] fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] deepseek support & unit test * [misc] remove debug code & useless print * [misc] fix typos (#5872) * [Feature] remove modeling file, use auto config. (#5884) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [Deepseek] remove redundant code (#5888) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [Feature/deepseek] resolve comment. (#5889) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [misc] mv module replacement into if branch * [misc] add some warning message and modify some code in unit test * [misc] fix typos --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hoxfix] Fix CUDA_DEVICE_MAX_CONNECTIONS for comm overlap Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feat] Diffusion Model(PixArtAlpha/StableDiffusion3) Support (#5838) * Diffusion Model Inference support * Stable Diffusion 3 Support * pixartalpha support * [HotFix] CI,import,requirements-test for #5838 (#5892) * [Hot Fix] CI,import,requirements-test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] Enable PP + SP for llama (#5868) * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use a one cross entropy func for all shardformer models --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [ShardFormer] Add Ulysses Sequence Parallelism support for Command-R, Qwen2 and ChatGLM (#5897) * add benchmark for sft, dpo, simpo, orpo. Add benchmarking result. Support lora with gradient checkpoint * fix style * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix eval * hotfix citation * [zero] support all-gather overlap (#5898) * [zero] support all-gather overlap * [zero] add overlap all-gather flag * [misc] fix typo * [zero] update api * fix orpo cross entropy loss * [Auto Parallel]: Speed up intra-op plan generation by 44% (#5446) * Remove unnecessary calls to deepcopy * Build DimSpec's difference dict only once This change considerably speeds up construction speed of DimSpec objects. The difference_dict is the same for each DimSpec object, so a single copy of it is enough. * Fix documentation of DimSpec's difference method * [ShardFormer] fix qwen2 sp (#5903) * [compatibility] support torch 2.2 (#5875) * Support Pytorch 2.2.2 * keep build_on_pr file and update .compatibility * fix object_to_tensor usage when torch>=2.3.0 (#5820) * [misc] support torch2.3 (#5893) * [misc] support torch2.3 * [devops] update compatibility ci * [devops] update compatibility ci * [devops] add debug * [devops] add debug * [devops] add debug * [devops] add debug * [devops] remove debug * [devops] remove debug * [release] update version (#5912) * [plugin] support all-gather overlap for hybrid parallel (#5919) * [plugin] fixed all-gather overlap support for hybrid parallel * add kto * fix style, add kto data sample * [Examples] Add lazy init to OPT and GPT examples (#5924) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [ColossalChat] Hotfix for ColossalChat (#5910) * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * fix ddp issue * add Qwen 1.5 32B * refactor tokenization * [FIX BUG] UnboundLocalError: cannot access local variable 'default_conversation' where it is not associated with a value (#5931) * cannot access local variable 'default_conversation' where it is not associated with a value set default value for 'default_conversation' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix test data * refactor evaluation * remove real data path * remove real data path * Add n_fused as an input from native_module (#5894) * [FIX BUG] convert env param to int in (#5934) * [Hotfix] Fix ZeRO typo #5936 Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feature] Add a switch to control whether the model checkpoint needs to be saved after each epoch ends (#5941) * Add a switch to control whether the model checkpoint needs to be saved after each epoch ends * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix style * fix style * fix style * [shardformer] hotfix attn mask (#5945) * [shardformer] hotfix attn mask (#5947) * [Feat] Distrifusion Acceleration Support for Diffusion Inference (#5895) * Distrifusion Support source * comp comm overlap optimization * sd3 benchmark * pixart distrifusion bug fix * sd3 bug fix and benchmark * generation bug fix * naming fix * add docstring, fix counter and shape error * add reference * readme and requirement * [zero] hotfix update master params (#5951) * [release] update version (#5952) * [Chat] Fix lora (#5946) * fix merging * remove filepath * fix style * Update README.md (#5958) * [hotfix] Remove unused plan section (#5957) * remove readme * fix readme * update * [test] add mixtral for sequence classification * [test] add mixtral transformer test * [moe] fix plugin * [test] mixtra pp shard test * [chore] handle non member group * [zero] solve hang * [test] pass mixtral shardformer test * [moe] implement transit between non moe tp and ep * [zero] solve hang * [misc] solve booster hang by rename the variable * solve hang when parallel mode = pp + dp * [moe] implement submesh initialization * [moe] add mixtral dp grad scaling when not all experts are activated * [chore] manually revert unintended commit * [chore] trivial fix * [chore] arg pass & remove drop token * [test] add mixtral modelling test * [moe] implement tp * [moe] test deepseek * [moe] clean legacy code * [Feature] MoE Ulysses Support (#5918) * moe sp support * moe sp bug solve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [chore] minor fix * [moe] init moe plugin comm setting with sp * moe sp + ep bug fix * [moe] finalize test (no pp) * [moe] full test for deepseek and mixtral (pp + sp to fix) * [chore] minor fix after rebase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [chore] solve moe ckpt test failure and some other arg pass failure * [moe] remove ops * [test] fix test: test_zero1_2 * [bug] fix: somehow logger hangs the program * [moe] deepseek moe sp support * [test] add check * [deepseek] replace attn (a workaround for bug in transformers) * [misc] skip redunant test * [misc] remove debug/print code * [moe] refactor mesh assignment * Revert "[moe] implement submesh initialization" This reverts commit2f9bce6686
. * [chore] change moe_pg_mesh to private * [misc] remove incompatible test config * [misc] fix ci failure: change default value to false in moe plugin * [misc] remove useless condition * [chore] docstring * [moe] remove force_overlap_comm flag and add warning instead * [doc] add MoeHybridParallelPlugin docstring * [moe] solve dp axis issue * [chore] remove redundant test case, print string & reduce test tokens * [feat] Dist Loader for Eval (#5950) * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tp error * remove unused parameters * remove unused * update inference * update docs * update inference --------- Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [lora] lora support hybrid parallel plugin (#5956) * lora support hybrid plugin * fix * fix * fix * fix * Support overall loss, update KTO logging * [Docs] clarify launch port Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Hotfix] README link (#5966) * update ignore * update readme * run style * update readme * [Hotfix] Avoid fused RMSnorm import error without apex (#5985) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Chat] fix readme (#5989) * fix readme * fix readme, tokenization fully tested * fix readme, tokenization fully tested * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix sync condition (#6000) * [plugin] add cast inputs option for zero (#6003) * [pre-commit.ci] pre-commit autoupdate (#5995) updates: - [github.com/psf/black-pre-commit-mirror: 24.4.2 → 24.8.0](https://github.com/psf/black-pre-commit-mirror/compare/24.4.2...24.8.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [misc] Bypass the huggingface bug to solve the mask mismatch problem (#5991) * [Feature] Zigzag Ring attention (#5905) * halfway * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add sp_mode to benchmark; fix varlen interface * update softmax_lse shape by new interface * change tester name * remove buffer clone; support packed seq layout * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [misc] update compatibility (#6008) * [misc] update compatibility * [misc] update requirements * [devops] disable requirements cache * [test] fix torch ddp test * [test] fix rerun on address in use * [test] fix lazy init * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix the merge * overlap kv comm with output rescale (#6017) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * fix the merge * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix * fix * fix the merge * fix * [misc] Use dist logger in plugins (#6011) * use dist logger in plugins * remove trash * print on rank 0 --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * fix * fix * fix * fix * fix the merge * fix * fix * fix * fix --------- Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train_dpo.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update low_level_zero_plugin.py * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [CI] Remove triton version for compatibility bug; update req torch >=2.2 (#6018) * remove triton version * remove torch 2.2 * remove torch 2.1 * debug * remove 2.1 build tests * require torch >=2.2 --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [plugin] hotfix zero plugin (#6036) * [plugin] hotfix zero plugin * [plugin] hotfix zero plugin * [Colossal-LLaMA] Refactor latest APIs (#6030) * refactor latest code * update api * add dummy dataset * update Readme * add setup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update files * add PP support * update arguments * update argument * reorg folder * update version * remove IB infor * update utils * update readme * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update save for zero * update save * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add apex * update --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * add fused norm (#6038) * [FP8] unsqueeze scale to make it compatible with torch.compile (#6040) * [colossalai/checkpoint_io/...] fix bug in load_state_dict_into_model; format error msg (#6020) * fix bug in load_state_dict_into_model; format error msg * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update utils.py to support checking missing_keys * Update general_checkpoint_io.py fix bug in missing_keys error message * retrigger tests --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hotfix] Remove deprecated install (#6042) * remove deprecated install * remove unused folder * [fp8] optimize all-gather (#6043) * [fp8] optimize all-gather * [fp8] fix all gather fp8 ring * [fp8] enable compile * [fp8] fix all gather fp8 ring * [fp8] fix linear hook (#6046) * [fp8] disable all_to_all_fp8 in intranode (#6045) * enhance all_to_all_fp8 with internode comm control * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * disable some fp8 ops due to performance issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [release] update version (#6041) * [release] update version * [devops] update comp test * [devops] update comp test debug * [devops] debug comp test * [devops] debug comp test * [devops] debug comp test * [devops] debug comp test * [devops] debug comp test * [Feature] Split cross-entropy computation in SP (#5959) * halfway * fix cross-PP-stage position id length diff bug * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * update softmax_lse shape by new interface * change tester name * remove buffer clone; support packed seq layout * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements * adapt chatglm, command-R, qwen * debug * halfway * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * add sp_mode to benchmark; fix varlen interface * update softmax_lse shape by new interface * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements * add comments * q1 index only once * remove events to simplify stream sync * simplify forward/backward logic * 2d ring forward passed * 2d ring backward passed * fixes * fix ring attn loss * 2D ring backward + llama passed * merge * update logger * fix typo * rebase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * remove typos * fixes * support GPT --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [hotfix] moe hybrid parallelism benchmark & follow-up fix (#6048) * [example] pass use_fp8_comm flag to all plugins * [example] add mixtral benchmark * [moe] refine assertion and check * [moe] fix mixtral & add more tests * [moe] consider checking dp * sp group and moe_dp_group * [mixtral] remove gate tp & add more tests * [deepseek] fix tp & sp for deepseek * [mixtral] minor fix * [deepseek] add deepseek benchmark * [fp8] hotfix backward hook (#6053) * [fp8] hotfix backward hook * [fp8] hotfix pipeline loss accumulation * [doc] update sp doc (#6055) * update sp doc * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix the sp * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the attn * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * [fp8] fix missing fp8_comm flag in mixtral (#6057) * fix * fix * fix * [fp8] Disable all_gather intranode. Disable Redundant all_gather fp8 (#6059) * all_gather only internode, fix pytest * fix cuda arch <89 compile pytest error * fix pytest failure * disable all_gather_into_tensor_flat_fp8 * fix fp8 format * fix pytest * fix conversations * fix chunk tuple to list * [doc] FP8 training and communication document (#6050) * Add FP8 training and communication document * add fp8 docstring for plugins * fix typo * fix typo * fix * fix * [moe] add parallel strategy for shared_expert && fix test for deepseek (#6063) * [ColossalEval] support for vllm (#6056) * support vllm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * modify vllm and update readme * run pre-commit * remove dupilicated lines and refine code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update param name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refine code * update readme * refine code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [release] update version (#6062) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] fix poc format * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix mem check; * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [feat] moehybrid support zerobubble; * [fix] fix zerobubble pp for shardformer type input; * [fix] fix require_grad & deallocate call; * [fix] fix mem assert; * [fix] fix fwd branch, fwd pass both micro_batch & internal_inputs' * [fix] fix pipeline util func deallocate --> release_tensor_data; fix bwd_b loss bwd branch; * [fix] fix zerobubble; support shardformer model type; * [fix] fix test_pipeline_utils ci; * [plugin] hybrid support zero bubble pipeline (#6060) * hybrid support zbv * fix fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * hybrid support zbv * fix fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: duanjunwen <935724073@qq.com> * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] fix poc format * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [feat] update test; rm comments; * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix mem check; * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix mem assert; * [fix] fix fwd branch, fwd pass both micro_batch & internal_inputs' * [plugin] hybrid support zero bubble pipeline (#6060) * hybrid support zbv * fix fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * hybrid support zbv * fix fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: duanjunwen <935724073@qq.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: HangXu <hangxu0304@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: GuangyaoZhang <xjtu521@qq.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: Wang Binluo <32676639+wangbluo@users.noreply.github.com> Co-authored-by: wangbluo <2538539015@qq.com> Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: Camille Zhong <44392324+Camille7777@users.noreply.github.com> * [fix] fix mixtral policy; * [fix] fix mixtral policy; * [feat] support zbv in mixtral benchmark; * [fix] MixtralForCausalLMPolicy get_held_layer support zbv; * [feat] update MixtralPipelineForwards --> mixtral_model_forward; support zbv; * [feat] support MixtralPipelineForwards--> mixtral_for_causal_lm_forward for zbv * [zero bubble] support zero (#6080) * fp8 operators for compressed communication cast_to_fp8, cast_from_fp8, all_reduce_fp8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * fix scaling algorithm in FP8 casting * support fp8 communication in pipeline parallelism * add fp8_communication flag in the script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * shardformer fp8 * fix rebase * remove all to all * fix shardformer fp8 communication training degradation * [fp8] support all-gather flat tensor (#5932) * [fp8] add fp8 comm for low level zero * [test] add zero fp8 test case * [Feature] llama shardformer fp8 support (#5938) * add llama shardformer fp8 * Llama Shardformer Parity * fix typo * fix all reduce * fix pytest failure * fix reduce op and move function to fp8.py * fix typo * [FP8] rebase main (#5963) * add SimPO * fix dataloader * remove debug code * add orpo * fix style * fix colossalai, transformers version * fix colossalai, transformers version * fix colossalai, transformers version * fix torch colossalai version * update transformers version * [shardformer] DeepseekMoE support (#5871) * [Feature] deepseek moe expert parallel implement * [misc] fix typo, remove redundant file (#5867) * [misc] fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] deepseek support & unit test * [misc] remove debug code & useless print * [misc] fix typos (#5872) * [Feature] remove modeling file, use auto config. (#5884) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [Deepseek] remove redundant code (#5888) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [Feature/deepseek] resolve comment. (#5889) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [misc] mv module replacement into if branch * [misc] add some warning message and modify some code in unit test * [misc] fix typos --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hoxfix] Fix CUDA_DEVICE_MAX_CONNECTIONS for comm overlap Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feat] Diffusion Model(PixArtAlpha/StableDiffusion3) Support (#5838) * Diffusion Model Inference support * Stable Diffusion 3 Support * pixartalpha support * [HotFix] CI,import,requirements-test for #5838 (#5892) * [Hot Fix] CI,import,requirements-test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] Enable PP + SP for llama (#5868) * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use a one cross entropy func for all shardformer models --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [ShardFormer] Add Ulysses Sequence Parallelism support for Command-R, Qwen2 and ChatGLM (#5897) * add benchmark for sft, dpo, simpo, orpo. Add benchmarking result. Support lora with gradient checkpoint * fix style * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix eval * hotfix citation * [zero] support all-gather overlap (#5898) * [zero] support all-gather overlap * [zero] add overlap all-gather flag * [misc] fix typo * [zero] update api * fix orpo cross entropy loss * [Auto Parallel]: Speed up intra-op plan generation by 44% (#5446) * Remove unnecessary calls to deepcopy * Build DimSpec's difference dict only once This change considerably speeds up construction speed of DimSpec objects. The difference_dict is the same for each DimSpec object, so a single copy of it is enough. * Fix documentation of DimSpec's difference method * [ShardFormer] fix qwen2 sp (#5903) * [compatibility] support torch 2.2 (#5875) * Support Pytorch 2.2.2 * keep build_on_pr file and update .compatibility * fix object_to_tensor usage when torch>=2.3.0 (#5820) * [misc] support torch2.3 (#5893) * [misc] support torch2.3 * [devops] update compatibility ci * [devops] update compatibility ci * [devops] add debug * [devops] add debug * [devops] add debug * [devops] add debug * [devops] remove debug * [devops] remove debug * [release] update version (#5912) * [plugin] support all-gather overlap for hybrid parallel (#5919) * [plugin] fixed all-gather overlap support for hybrid parallel * add kto * fix style, add kto data sample * [Examples] Add lazy init to OPT and GPT examples (#5924) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [ColossalChat] Hotfix for ColossalChat (#5910) * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * fix ddp issue * add Qwen 1.5 32B * refactor tokenization * [FIX BUG] UnboundLocalError: cannot access local variable 'default_conversation' where it is not associated with a value (#5931) * cannot access local variable 'default_conversation' where it is not associated with a value set default value for 'default_conversation' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix test data * refactor evaluation * remove real data path * remove real data path * Add n_fused as an input from native_module (#5894) * [FIX BUG] convert env param to int in (#5934) * [Hotfix] Fix ZeRO typo #5936 Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feature] Add a switch to control whether the model checkpoint needs to be saved after each epoch ends (#5941) * Add a switch to control whether the model checkpoint needs to be saved after each epoch ends * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix style * fix style * fix style * [shardformer] hotfix attn mask (#5945) * [shardformer] hotfix attn mask (#5947) * [Feat] Distrifusion Acceleration Support for Diffusion Inference (#5895) * Distrifusion Support source * comp comm overlap optimization * sd3 benchmark * pixart distrifusion bug fix * sd3 bug fix and benchmark * generation bug fix * naming fix * add docstring, fix counter and shape error * add reference * readme and requirement * [zero] hotfix update master params (#5951) * [release] update version (#5952) * [Chat] Fix lora (#5946) * fix merging * remove filepath * fix style * Update README.md (#5958) * [hotfix] Remove unused plan section (#5957) * remove readme * fix readme * update * [test] add mixtral for sequence classification * [test] add mixtral transformer test * [moe] fix plugin * [test] mixtra pp shard test * [chore] handle non member group * [zero] solve hang * [test] pass mixtral shardformer test * [moe] implement transit between non moe tp and ep * [zero] solve hang * [misc] solve booster hang by rename the variable * solve hang when parallel mode = pp + dp * [moe] implement submesh initialization * [moe] add mixtral dp grad scaling when not all experts are activated * [chore] manually revert unintended commit * [chore] trivial fix * [chore] arg pass & remove drop token * [test] add mixtral modelling test * [moe] implement tp * [moe] test deepseek * [moe] clean legacy code * [Feature] MoE Ulysses Support (#5918) * moe sp support * moe sp bug solve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [chore] minor fix * [moe] init moe plugin comm setting with sp * moe sp + ep bug fix * [moe] finalize test (no pp) * [moe] full test for deepseek and mixtral (pp + sp to fix) * [chore] minor fix after rebase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [chore] solve moe ckpt test failure and some other arg pass failure * [moe] remove ops * [test] fix test: test_zero1_2 * [bug] fix: somehow logger hangs the program * [moe] deepseek moe sp support * [test] add check * [deepseek] replace attn (a workaround for bug in transformers) * [misc] skip redunant test * [misc] remove debug/print code * [moe] refactor mesh assignment * Revert "[moe] implement submesh initialization" This reverts commit2f9bce6686
. * [chore] change moe_pg_mesh to private * [misc] remove incompatible test config * [misc] fix ci failure: change default value to false in moe plugin * [misc] remove useless condition * [chore] docstring * [moe] remove force_overlap_comm flag and add warning instead * [doc] add MoeHybridParallelPlugin docstring * [moe] solve dp axis issue * [chore] remove redundant test case, print string & reduce test tokens * [feat] Dist Loader for Eval (#5950) * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tp error * remove unused parameters * remove unused * update inference * update docs * update inference --------- Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [lora] lora support hybrid parallel plugin (#5956) * lora support hybrid plugin * fix * fix * fix * fix * fp8 operators for compressed communication cast_to_fp8, cast_from_fp8, all_reduce_fp8 * fix scaling algorithm in FP8 casting * support fp8 communication in pipeline parallelism * add fp8_communication flag in the script * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * shardformer fp8 * fix rebase * remove all to all * fix shardformer fp8 communication training degradation * [fp8] support all-gather flat tensor (#5932) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update low_level_optim.py --------- Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: Wang Binluo <32676639+wangbluo@users.noreply.github.com> Co-authored-by: HangXu <hangxu0304@gmail.com> * [fp8]support all2all fp8 (#5953) * support all2all fp8 * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [fp8] add fp8 linear (#5967) * [fp8] add fp8 linear * [test] fix fp8 linear test condition * [test] fix fp8 linear test condition * [test] fix fp8 linear test condition * [fp8] support fp8 amp for hybrid parallel plugin (#5975) * [fp8] support fp8 amp for hybrid parallel plugin * [test] add fp8 hook test * [fp8] fix fp8 linear compatibility * fix (#5976) * [Feature]: support FP8 communication in DDP, FSDP, Gemini (#5928) * support fp8_communication in the Torch DDP grad comm, FSDP grad comm, and FSDP params comm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implement communication hook for FSDP params all-gather * added unit test for fp8 operators * support fp8 communication in GeminiPlugin * update training scripts to support fsdp and fp8 communication * fixed some minor bugs observed in unit test * add all_gather_into_tensor_flat_fp8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add skip the test if torch < 2.2.0 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add skip the test if torch < 2.2.0 * add skip the test if torch < 2.2.0 * add fp8_comm flag * rebase latest fp8 operators * rebase latest fp8 operators * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [test ci]Feature/fp8 comm (#5981) * fix * fix * fix * [fp8] support gemini plugin (#5978) * [fp8] refactor hook * [fp8] support gemini plugin * [example] add fp8 option for llama benchmark * [fp8] use torch compile (torch >= 2.3.0) (#5979) * [fp8] use torch compile (torch >= 2.4.0) * [fp8] set use_fast_accum in linear * [chore] formal version check * [chore] fix sig * [fp8]Moe support fp8 communication (#5977) * fix * support moe fp8 * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix fix fi * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [fp8] support hybrid parallel plugin (#5982) * support fp8 comm for qwen2 model * support fp8 comm for qwen2 model * support fp8 comm for qwen2 model * fp8 * fix * bert and bloom * chatglm and command * gpt2,gptj,bert, falcon,blip2 * mistral,opy,sam,t5,vit,whisper * fix * fix * fix * [fp8] refactor fp8 linear with compile (#5993) * [fp8] refactor fp8 linear with compile * [fp8] fix linear test * [fp8] fix linear test * [fp8] support asynchronous FP8 communication (#5997) * fix * fix * fix * support async all2all * support async op for all gather * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [fp8] update torch.compile for linear_fp8 to >= 2.4.0 (#6004) * [fp8] linear perf enhancement * [fp8]update reduce-scatter test (#6002) * fix * fix * fix * fix * [fp8] add use_fp8 option for MoeHybridParallelPlugin (#6009) * [fp8] zero support fp8 linear. (#6006) * fix * fix * fix * zero fp8 * zero fp8 * Update requirements.txt * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix the merge * fix the merge * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix * fix * fix the merge * fix * fix * fix * fix * fix * fix the merge * fix * fix * fix * fix * [fp8] Merge feature/fp8_comm to main branch of Colossalai (#6016) * add SimPO * fix dataloader * remove debug code * add orpo * fix style * fix colossalai, transformers version * fix colossalai, transformers version * fix colossalai, transformers version * fix torch colossalai version * update transformers version * [shardformer] DeepseekMoE support (#5871) * [Feature] deepseek moe expert parallel implement * [misc] fix typo, remove redundant file (#5867) * [misc] fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] deepseek support & unit test * [misc] remove debug code & useless print * [misc] fix typos (#5872) * [Feature] remove modeling file, use auto config. (#5884) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [Deepseek] remove redundant code (#5888) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [Feature/deepseek] resolve comment. (#5889) * [misc] fix typos * [Feature] deepseek support via auto model, remove modeling file * [misc] delete useless file * [misc] fix typos * [misc] remove redundant code * [misc] mv module replacement into if branch * [misc] add some warning message and modify some code in unit test * [misc] fix typos --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hoxfix] Fix CUDA_DEVICE_MAX_CONNECTIONS for comm overlap Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feat] Diffusion Model(PixArtAlpha/StableDiffusion3) Support (#5838) * Diffusion Model Inference support * Stable Diffusion 3 Support * pixartalpha support * [HotFix] CI,import,requirements-test for #5838 (#5892) * [Hot Fix] CI,import,requirements-test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Feature] Enable PP + SP for llama (#5868) * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use a one cross entropy func for all shardformer models --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [ShardFormer] Add Ulysses Sequence Parallelism support for Command-R, Qwen2 and ChatGLM (#5897) * add benchmark for sft, dpo, simpo, orpo. Add benchmarking result. Support lora with gradient checkpoint * fix style * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix eval * hotfix citation * [zero] support all-gather overlap (#5898) * [zero] support all-gather overlap * [zero] add overlap all-gather flag * [misc] fix typo * [zero] update api * fix orpo cross entropy loss * [Auto Parallel]: Speed up intra-op plan generation by 44% (#5446) * Remove unnecessary calls to deepcopy * Build DimSpec's difference dict only once This change considerably speeds up construction speed of DimSpec objects. The difference_dict is the same for each DimSpec object, so a single copy of it is enough. * Fix documentation of DimSpec's difference method * [ShardFormer] fix qwen2 sp (#5903) * [compatibility] support torch 2.2 (#5875) * Support Pytorch 2.2.2 * keep build_on_pr file and update .compatibility * fix object_to_tensor usage when torch>=2.3.0 (#5820) * [misc] support torch2.3 (#5893) * [misc] support torch2.3 * [devops] update compatibility ci * [devops] update compatibility ci * [devops] add debug * [devops] add debug * [devops] add debug * [devops] add debug * [devops] remove debug * [devops] remove debug * [release] update version (#5912) * [plugin] support all-gather overlap for hybrid parallel (#5919) * [plugin] fixed all-gather overlap support for hybrid parallel * add kto * fix style, add kto data sample * [Examples] Add lazy init to OPT and GPT examples (#5924) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [ColossalChat] Hotfix for ColossalChat (#5910) * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * add ignore and tiny llama * fix path issue * run style * fix issue * update bash * fix ddp issue * add Qwen 1.5 32B * refactor tokenization * [FIX BUG] UnboundLocalError: cannot access local variable 'default_conversation' where it is not associated with a value (#5931) * cannot access local variable 'default_conversation' where it is not associated with a value set default value for 'default_conversation' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix test data * refactor evaluation * remove real data path * remove real data path * Add n_fused as an input from native_module (#5894) * [FIX BUG] convert env param to int in (#5934) * [Hotfix] Fix ZeRO typo #5936 Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Feature] Add a switch to control whether the model checkpoint needs to be saved after each epoch ends (#5941) * Add a switch to control whether the model checkpoint needs to be saved after each epoch ends * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix style * fix style * fix style * [shardformer] hotfix attn mask (#5945) * [shardformer] hotfix attn mask (#5947) * [Feat] Distrifusion Acceleration Support for Diffusion Inference (#5895) * Distrifusion Support source * comp comm overlap optimization * sd3 benchmark * pixart distrifusion bug fix * sd3 bug fix and benchmark * generation bug fix * naming fix * add docstring, fix counter and shape error * add reference * readme and requirement * [zero] hotfix update master params (#5951) * [release] update version (#5952) * [Chat] Fix lora (#5946) * fix merging * remove filepath * fix style * Update README.md (#5958) * [hotfix] Remove unused plan section (#5957) * remove readme * fix readme * update * [test] add mixtral for sequence classification * [test] add mixtral transformer test * [moe] fix plugin * [test] mixtra pp shard test * [chore] handle non member group * [zero] solve hang * [test] pass mixtral shardformer test * [moe] implement transit between non moe tp and ep * [zero] solve hang * [misc] solve booster hang by rename the variable * solve hang when parallel mode = pp + dp * [moe] implement submesh initialization * [moe] add mixtral dp grad scaling when not all experts are activated * [chore] manually revert unintended commit * [chore] trivial fix * [chore] arg pass & remove drop token * [test] add mixtral modelling test * [moe] implement tp * [moe] test deepseek * [moe] clean legacy code * [Feature] MoE Ulysses Support (#5918) * moe sp support * moe sp bug solve * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [chore] minor fix * [moe] init moe plugin comm setting with sp * moe sp + ep bug fix * [moe] finalize test (no pp) * [moe] full test for deepseek and mixtral (pp + sp to fix) * [chore] minor fix after rebase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [chore] solve moe ckpt test failure and some other arg pass failure * [moe] remove ops * [test] fix test: test_zero1_2 * [bug] fix: somehow logger hangs the program * [moe] deepseek moe sp support * [test] add check * [deepseek] replace attn (a workaround for bug in transformers) * [misc] skip redunant test * [misc] remove debug/print code * [moe] refactor mesh assignment * Revert "[moe] implement submesh initialization" This reverts commit2f9bce6686
. * [chore] change moe_pg_mesh to private * [misc] remove incompatible test config * [misc] fix ci failure: change default value to false in moe plugin * [misc] remove useless condition * [chore] docstring * [moe] remove force_overlap_comm flag and add warning instead * [doc] add MoeHybridParallelPlugin docstring * [moe] solve dp axis issue * [chore] remove redundant test case, print string & reduce test tokens * [feat] Dist Loader for Eval (#5950) * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * support auto distributed data loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tp error * remove unused parameters * remove unused * update inference * update docs * update inference --------- Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [lora] lora support hybrid parallel plugin (#5956) * lora support hybrid plugin * fix * fix * fix * fix * Support overall loss, update KTO logging * [Docs] clarify launch port Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Hotfix] README link (#5966) * update ignore * update readme * run style * update readme * [Hotfix] Avoid fused RMSnorm import error without apex (#5985) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [Chat] fix readme (#5989) * fix readme * fix readme, tokenization fully tested * fix readme, tokenization fully tested * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix sync condition (#6000) * [plugin] add cast inputs option for zero (#6003) * [pre-commit.ci] pre-commit autoupdate (#5995) updates: - [github.com/psf/black-pre-commit-mirror: 24.4.2 → 24.8.0](https://github.com/psf/black-pre-commit-mirror/compare/24.4.2...24.8.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [misc] Bypass the huggingface bug to solve the mask mismatch problem (#5991) * [Feature] Zigzag Ring attention (#5905) * halfway * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add sp_mode to benchmark; fix varlen interface * update softmax_lse shape by new interface * change tester name * remove buffer clone; support packed seq layout * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [misc] update compatibility (#6008) * [misc] update compatibility * [misc] update requirements * [devops] disable requirements cache * [test] fix torch ddp test * [test] fix rerun on address in use * [test] fix lazy init * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix the merge * overlap kv comm with output rescale (#6017) Co-authored-by: Edenzzzz <wtan45@wisc.edu> * fix the merge * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the merge * fix * fix * fix the merge * fix * [misc] Use dist logger in plugins (#6011) * use dist logger in plugins * remove trash * print on rank 0 --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * fix * fix * fix * fix * fix the merge * fix * fix * fix * fix --------- Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: Guangyao Zhang <xjtu521@qq.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update train_dpo.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update low_level_zero_plugin.py * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [CI] Remove triton version for compatibility bug; update req torch >=2.2 (#6018) * remove triton version * remove torch 2.2 * remove torch 2.1 * debug * remove 2.1 build tests * require torch >=2.2 --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> * [plugin] hotfix zero plugin (#6036) * [plugin] hotfix zero plugin * [plugin] hotfix zero plugin * [Colossal-LLaMA] Refactor latest APIs (#6030) * refactor latest code * update api * add dummy dataset * update Readme * add setup * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update files * add PP support * update arguments * update argument * reorg folder * update version * remove IB infor * update utils * update readme * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update save for zero * update save * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add apex * update --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * add fused norm (#6038) * [FP8] unsqueeze scale to make it compatible with torch.compile (#6040) * [colossalai/checkpoint_io/...] fix bug in load_state_dict_into_model; format error msg (#6020) * fix bug in load_state_dict_into_model; format error msg * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update utils.py to support checking missing_keys * Update general_checkpoint_io.py fix bug in missing_keys error message * retrigger tests --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [Hotfix] Remove deprecated install (#6042) * remove deprecated install * remove unused folder * [fp8] optimize all-gather (#6043) * [fp8] optimize all-gather * [fp8] fix all gather fp8 ring * [fp8] enable compile * [fp8] fix all gather fp8 ring * [fp8] fix linear hook (#6046) * [fp8] disable all_to_all_fp8 in intranode (#6045) * enhance all_to_all_fp8 with internode comm control * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * disable some fp8 ops due to performance issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [release] update version (#6041) * [release] update version * [devops] update comp test * [devops] update comp test debug * [devops] debug comp test * [devops] debug comp test * [devops] debug comp test * [devops] debug comp test * [devops] debug comp test * [Feature] Split cross-entropy computation in SP (#5959) * halfway * fix cross-PP-stage position id length diff bug * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * update softmax_lse shape by new interface * change tester name * remove buffer clone; support packed seq layout * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements * adapt chatglm, command-R, qwen * debug * halfway * fix cross-PP-stage position id length diff bug * fix typo * fix typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * unified cross entropy func for all shardformer models * remove redundant lines * add basic ring attn; debug cross entropy * fwd bwd logic complete * fwd bwd logic complete; add experimental triton rescale * precision tests passed * precision tests passed * fix typos and remove misc files * add sp_mode to benchmark; fix varlen interface * update softmax_lse shape by new interface * add varlen tests * fix typo * all tests passed * add dkv_group; fix mask * remove debug statements * add comments * q1 index only once * remove events to simplify stream sync * simplify forward/backward logic * 2d ring forward passed * 2d ring backward passed * fixes * fix ring attn loss * 2D ring backward + llama passed * merge * update logger * fix typo * rebase * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix typo * remove typos * fixes * support GPT --------- Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [hotfix] moe hybrid parallelism benchmark & follow-up fix (#6048) * [example] pass use_fp8_comm flag to all plugins * [example] add mixtral benchmark * [moe] refine assertion and check * [moe] fix mixtral & add more tests * [moe] consider checking dp * sp group and moe_dp_group * [mixtral] remove gate tp & add more tests * [deepseek] fix tp & sp for deepseek * [mixtral] minor fix * [deepseek] add deepseek benchmark * [fp8] hotfix backward hook (#6053) * [fp8] hotfix backward hook * [fp8] hotfix pipeline loss accumulation * [doc] update sp doc (#6055) * update sp doc * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * fix the sp * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix the attn * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * [fp8] fix missing fp8_comm flag in mixtral (#6057) * fix * fix * fix * [fp8] Disable all_gather intranode. Disable Redundant all_gather fp8 (#6059) * all_gather only internode, fix pytest * fix cuda arch <89 compile pytest error * fix pytest failure * disable all_gather_into_tensor_flat_fp8 * fix fp8 format * fix pytest * fix conversations * fix chunk tuple to list * [doc] FP8 training and communication document (#6050) * Add FP8 training and communication document * add fp8 docstring for plugins * fix typo * fix typo * fix * fix * [moe] add parallel strategy for shared_expert && fix test for deepseek (#6063) * [ColossalEval] support for vllm (#6056) * support vllm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * modify vllm and update readme * run pre-commit * remove dupilicated lines and refine code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update param name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refine code * update readme * refine code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> * [release] update version (#6062) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] fix poc format * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix mem check; * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [feat] moehybrid support zerobubble; * [fix] fix zerobubble pp for shardformer type input; * [fix] fix require_grad & deallocate call; * [fix] fix mem assert; * [fix] fix fwd branch, fwd pass both micro_batch & internal_inputs' * [fix] fix pipeline util func deallocate --> release_tensor_data; fix bwd_b loss bwd branch; * [fix] fix zerobubble; support shardformer model type; * [fix] fix test_pipeline_utils ci; * [plugin] hybrid support zero bubble pipeline (#6060) * hybrid support zbv * fix fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * hybrid support zbv * fix fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: duanjunwen <935724073@qq.com> * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] fix poc format * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [feat] update test; rm comments; * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix mem check; * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix mem assert; * [fix] fix fwd branch, fwd pass both micro_batch & internal_inputs' * [plugin] hybrid support zero bubble pipeline (#6060) * hybrid support zbv * fix fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * [zerobubble]Support ZeroBubble Pipeline (#6034) * [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble; * [feat] add dw test; * [fix] fix weight not close; * [update] update text; * [feat] add test run_fwd_bwd automatic scheduling; * [feat] split communication and calculation; fix pop empty send_bwd_buffer error; * [feat] add test for p & p grad; * [feat] add comments for ZBV func; * [fix] rm useless assign and comments; * [fix] fix ci test; add pytest; * [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass; * [feat] add apply v_schedule graph; p & p.grad assert err exist; * [fix] update * [feat] fix ci; add assert; * [feat] fix poc format * [feat] fix func name & ci; add comments; * [fix] fix poc test; add comments in poc; * [feat] add optim backward_b_by_grad * [feat] fix optimizer bwd b & w; support return accum loss & output * [feat] add fwd_bwd_step, run_fwd_only; * [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict; * [fix] fix communication_map; * [feat] update test; rm comments; * [fix] rm zbv in hybridplugin * [fix] fix optim bwd; * [fix] fix optim bwd; * [fix] rm output.data after send fwd; * [fix] fix bwd step if condition; remove useless comments and format info; * [fix] fix detach output & release output; * [fix] rm requir_grad for output; * [fix] fix requir grad position and detach position and input&output local buffer append position; * [feat] add memory assertation; * [fix] fix mem check; * [fix] mem assertation' * [fix] fix mem assertation * [fix] fix mem; use a new model shape; only assert mem less and equal than theo; * [fix] fix model zoo import; * [fix] fix redundant detach & clone; add buffer assertation in the end; * [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap; * [fix] update optim state dict assert (include param group & state); fix mem assert after add optim; * [fix] add testcase with microbatch 4; * hybrid support zbv * fix fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update zero_bubble_pp.py * fix * fix-ci * fix [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix * fix * fix * fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix * fix * fix * fix --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: duanjunwen <935724073@qq.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * zbv support zero * fix * fix * fix --------- Co-authored-by: HangXu <hangxu0304@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: GuangyaoZhang <xjtu521@qq.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: Wang Binluo <32676639+wangbluo@users.noreply.github.com> Co-authored-by: wangbluo <2538539015@qq.com> Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> Co-authored-by: duanjunwen <935724073@qq.com> Co-authored-by: Camille Zhong <44392324+Camille7777@users.noreply.github.com> * [fix] fix llama, mixtral benchmark zbv loss none bug; update mixtral & llama policy and modeling; * [feat] Linear1D_COL/ROW support zbv WeightGradStore; * [feat] support use_zbv in llama, mixtral modeling; only replace Linear1D_Col/Row policy; * [fix] fix test case; moe error in second iter * [feat]EPMixtralSparseMoeBlock (op in MOE) support zbv; * [fix] fix bwd b; now bwd w only for Layer replaced by Linear1D_Col/Row; other layer perform a fully bwd; * [fix] debug zbv llama test; * [fix] rm use_zbv flag in Shardconfig; rm debug info; * [fix] add & fix llama test * [feat] support meta cache, meta_grad_send, meta_tensor_send; fix runtime too long in Recv Bwd; benchmark for llama + Hybrid(tp+pp); * [fix\ fix fail case test_shard_llama * [fix] fix test_shard_llama * [fix] fix llama modeling policy; * [fix] fix test_shard_llama ci; * [fix] fix test zerobubble * [fix] fix handle name; rm useless comments; * [fix] fix send recv signature; * [fix] fix comment in llama & benchmark * [feat] support no tensor parallel Linear in shardformer; Add test for use weightGradStore and not use WeightGradStore * [fix] fix linear (no tp) ops func name; * [feat] support zbv in mixtral benchmark; (#6083) * [feat] support zbv in mixtral benchmark; * [fix] MixtralForCausalLMPolicy get_held_layer support zbv; * [feat] update MixtralPipelineForwards --> mixtral_model_forward; support zbv; * [feat] support MixtralPipelineForwards--> mixtral_for_causal_lm_forward for zbv * [fix] fix llama, mixtral benchmark zbv loss none bug; update mixtral & llama policy and modeling; * [feat] Linear1D_COL/ROW support zbv WeightGradStore; * [feat] support use_zbv in llama, mixtral modeling; only replace Linear1D_Col/Row policy; * [fix] fix test case; moe error in second iter * [feat]EPMixtralSparseMoeBlock (op in MOE) support zbv; * [fix] fix bwd b; now bwd w only for Layer replaced by Linear1D_Col/Row; other layer perform a fully bwd; * [fix] debug zbv llama test; * [fix] rm use_zbv flag in Shardconfig; rm debug info; * [fix] add & fix llama test * [feat] support meta cache, meta_grad_send, meta_tensor_send; fix runtime too long in Recv Bwd; benchmark for llama + Hybrid(tp+pp); * [fix\ fix fail case test_shard_llama * [fix] fix test_shard_llama * [fix] fix llama modeling policy; * [fix] fix test_shard_llama ci; * [fix] fix test zerobubble * [fix] fix handle name; rm useless comments; * [fix] fix send recv signature; * [fix] fix comment in llama & benchmark * [feat] support no tensor parallel Linear in shardformer; Add test for use weightGradStore and not use WeightGradStore * [fix] fix linear (no tp) ops func name; * [fix] fix fp8 args in HybridParallel * [fix] fix hybridparall use_fp8 config * [fix] fix use_fp8 flag * [fix] fix model zoo init * [feat] support no_tp Linear for sharderformer.llama * [fix] fix zbv llama pp4 * [fix] fix send_tensor_metadata & send_grad_metadata; * [feat] fix testcase; * [feat] support mixtral policy with zbv tp_Linear & non_tp_Linear * [feat] update mixtral policy & bert policy for zerobubble * [fix] fix p2p error in zbv * [fix] fix attn * [fix] fix mixtral modeling & policy; update wait handles; doing benchmarking for llama hybrid; * [fix] fix zbv wait_handle * [fix] rm debug info; update llama policy; update wait handle * [fix] fix test_lora * [fix] fix test_lora in llama policy * [fix] fix wait handle in run_fwd_bwd * [fix] remove debug info; * [fix] rm unused comments * [fix] fix fp8 overlap code * [fix] fix yml file & v_schedule comments * [fix] rm fwd only meta cache comments; --------- Co-authored-by: flybird11111 <1829166702@qq.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: HangXu <hangxu0304@gmail.com> Co-authored-by: GuangyaoZhang <xjtu521@qq.com> Co-authored-by: Hongxin Liu <lhx0217@gmail.com> Co-authored-by: YeAnbang <anbangy2@outlook.com> Co-authored-by: Haze188 <haze188@qq.com> Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu> Co-authored-by: Edenzzzz <wtan45@wisc.edu> Co-authored-by: Runyu Lu <77330637+LRY89757@users.noreply.github.com> Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com> Co-authored-by: Stephan Kö <stephankoe@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com> Co-authored-by: Tong Li <tong.li352711588@gmail.com> Co-authored-by: zhurunhua <1281592874@qq.com> Co-authored-by: Insu Jang <insujang@umich.edu> Co-authored-by: Gao, Ruiyuan <905370712@qq.com> Co-authored-by: hxwang <wang1570@e.ntu.edu.sg> Co-authored-by: Michelle <qianranma8@gmail.com> Co-authored-by: Wang Binluo <32676639+wangbluo@users.noreply.github.com> Co-authored-by: wangbluo <2538539015@qq.com> Co-authored-by: root <root@notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9-0.notebook-8f919155-6035-47b4-9c6f-1be133b9e2c9.colossal-ai.svc.cluster.local> Co-authored-by: Camille Zhong <44392324+Camille7777@users.noreply.github.com>
parent
184a653704
commit
e0c68ab6d3
@ -1,11 +1,12 @@
|
|||||||
from .p2p import PipelineP2PCommunication
|
from .p2p import PipelineP2PCommunication
|
||||||
from .schedule import InterleavedSchedule, OneForwardOneBackwardSchedule, PipelineSchedule
|
from .schedule import InterleavedSchedule, OneForwardOneBackwardSchedule, PipelineSchedule, ZeroBubbleVPipeScheduler
|
||||||
from .stage_manager import PipelineStageManager
|
from .stage_manager import PipelineStageManager
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"PipelineSchedule",
|
"PipelineSchedule",
|
||||||
"OneForwardOneBackwardSchedule",
|
"OneForwardOneBackwardSchedule",
|
||||||
"InterleavedSchedule",
|
"InterleavedSchedule",
|
||||||
|
"ZeroBubbleVPipeScheduler",
|
||||||
"PipelineP2PCommunication",
|
"PipelineP2PCommunication",
|
||||||
"PipelineStageManager",
|
"PipelineStageManager",
|
||||||
]
|
]
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
from .base import PipelineSchedule
|
from .base import PipelineSchedule
|
||||||
from .interleaved_pp import InterleavedSchedule
|
from .interleaved_pp import InterleavedSchedule
|
||||||
from .one_f_one_b import OneForwardOneBackwardSchedule
|
from .one_f_one_b import OneForwardOneBackwardSchedule
|
||||||
|
from .zero_bubble_pp import ZeroBubbleVPipeScheduler
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"PipelineSchedule",
|
"PipelineSchedule",
|
||||||
"OneForwardOneBackwardSchedule",
|
"OneForwardOneBackwardSchedule",
|
||||||
"InterleavedSchedule",
|
"InterleavedSchedule",
|
||||||
|
"ZeroBubbleVPipeScheduler",
|
||||||
]
|
]
|
||||||
|
@ -0,0 +1,449 @@
|
|||||||
|
# Refer from Zero Bubble Pipeline Parallelism.
|
||||||
|
# Github: https://github.com/sail-sg/zero-bubble-pipeline-parallelism
|
||||||
|
# Paper: https://arxiv.org/abs/2401.10241
|
||||||
|
# The following applies to all files unless otherwise noted:
|
||||||
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions
|
||||||
|
# are met:
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in the
|
||||||
|
# documentation and/or other materials provided with the distribution.
|
||||||
|
# * Neither the name of NVIDIA CORPORATION nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived
|
||||||
|
# from this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
|
||||||
|
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||||
|
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||||
|
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
||||||
|
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
from collections import deque
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(eq=True, frozen=True)
|
||||||
|
class ScheduledNode:
|
||||||
|
type: str
|
||||||
|
chunk: int
|
||||||
|
stage: int
|
||||||
|
minibatch: int
|
||||||
|
start_time: int = 0
|
||||||
|
completion_time: int = 0
|
||||||
|
rollback: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
class PipelineGraph(object):
|
||||||
|
"""PipelineGraph"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
n_stage,
|
||||||
|
n_micro,
|
||||||
|
f_cost,
|
||||||
|
b_cost,
|
||||||
|
w_cost,
|
||||||
|
c_cost,
|
||||||
|
f_mem,
|
||||||
|
b_mem,
|
||||||
|
w_mem,
|
||||||
|
max_mem=None,
|
||||||
|
):
|
||||||
|
self.n_node = 6 * n_stage * n_micro
|
||||||
|
self.n_stage = n_stage
|
||||||
|
self.n_micro = n_micro
|
||||||
|
self.f_cost = f_cost
|
||||||
|
self.b_cost = b_cost
|
||||||
|
self.w_cost = w_cost
|
||||||
|
self.c_cost = c_cost
|
||||||
|
self.f_mem = f_mem
|
||||||
|
self.b_mem = b_mem
|
||||||
|
self.w_mem = w_mem
|
||||||
|
self.fbw_cost = [f_cost, b_cost, w_cost]
|
||||||
|
self.fbw_mem = [f_mem, b_mem, w_mem]
|
||||||
|
self.max_mem = max_mem or f_mem * self.n_stage * 2
|
||||||
|
|
||||||
|
def get_id(self, cat, chunk, stage, micro):
|
||||||
|
return (
|
||||||
|
cat * 2 * self.n_stage * self.n_micro + chunk * self.n_stage * self.n_micro + stage * self.n_micro + micro
|
||||||
|
)
|
||||||
|
|
||||||
|
def try_v_schedule(self, fill_f=True, fill_b=True, approved_bubble=None):
|
||||||
|
count = []
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
count.append([0] * 6)
|
||||||
|
|
||||||
|
end_time = [-1] * self.n_node
|
||||||
|
cur_time = [0] * self.n_stage
|
||||||
|
mem = [0] * self.n_stage
|
||||||
|
stage_bubble = [0] * self.n_stage
|
||||||
|
pending_w = [deque() for _ in range(self.n_stage)]
|
||||||
|
schedule = [[] for _ in range(self.n_stage)]
|
||||||
|
stage_str = [" " * i for i in range(self.n_stage)]
|
||||||
|
|
||||||
|
if approved_bubble is None:
|
||||||
|
approved_bubble = [-1] * self.n_stage
|
||||||
|
max_approved_bubble = max(approved_bubble)
|
||||||
|
|
||||||
|
def get_max_stage_bubble(stage=-1):
|
||||||
|
max_stage_bubble = 0
|
||||||
|
for bb in stage_bubble:
|
||||||
|
max_stage_bubble = max(max_stage_bubble, bb)
|
||||||
|
if stage >= 0:
|
||||||
|
max_stage_bubble = max(max_stage_bubble, max_approved_bubble - approved_bubble[stage])
|
||||||
|
return max_stage_bubble
|
||||||
|
|
||||||
|
def put_w(stage):
|
||||||
|
assert len(pending_w[stage]) > 0
|
||||||
|
_, chunk_, _ = pending_w[stage].popleft()
|
||||||
|
put(2, chunk_, stage)
|
||||||
|
|
||||||
|
def put(cat, chunk, stage, assert_cnt=True):
|
||||||
|
_tmp = _no_bubble = cur_time[stage] + self.fbw_cost[cat]
|
||||||
|
_cnt = count[stage][cat * 2 + chunk]
|
||||||
|
# assert _cnt < self.n_micro
|
||||||
|
if _cnt >= self.n_micro:
|
||||||
|
if not assert_cnt:
|
||||||
|
stage_str[stage] += " "
|
||||||
|
cur_time[stage] = _tmp # TODO
|
||||||
|
return
|
||||||
|
assert False
|
||||||
|
assert mem[stage] + self.fbw_mem[cat] <= self.max_mem
|
||||||
|
stage_str[stage] += "FfBbWw"[cat * 2 + chunk] + str(_cnt + 1) + " " * (3 - len(str(_cnt + 1)))
|
||||||
|
if cat > 0 or chunk > 0:
|
||||||
|
last_id = cat * 2 + chunk - 1
|
||||||
|
if cat < 2:
|
||||||
|
assert end_time[self.get_id(last_id // 2, last_id % 2, stage, _cnt)] >= 0
|
||||||
|
else:
|
||||||
|
assert end_time[self.get_id(1, chunk, stage, _cnt)] >= 0
|
||||||
|
if chunk == 1 and cat < 2:
|
||||||
|
if stage < self.n_stage - 1:
|
||||||
|
_fa_id = self.get_id(cat, chunk, stage + 1, _cnt)
|
||||||
|
assert end_time[_fa_id] >= 0
|
||||||
|
_tmp = max(_tmp, end_time[_fa_id] + self.c_cost + self.fbw_cost[cat])
|
||||||
|
if chunk == 0 and cat < 2:
|
||||||
|
if stage > 0:
|
||||||
|
_fa_id = self.get_id(cat, chunk, stage - 1, _cnt)
|
||||||
|
assert end_time[_fa_id] >= 0, f"{cat}, {chunk}, {stage}, {_cnt}"
|
||||||
|
_tmp = max(_tmp, end_time[_fa_id] + self.c_cost + self.fbw_cost[cat])
|
||||||
|
_id = self.get_id(cat, chunk, stage, _cnt)
|
||||||
|
if count[stage][0] > 0:
|
||||||
|
stage_bubble[stage] += _tmp - _no_bubble
|
||||||
|
end_time[_id] = _tmp
|
||||||
|
cur_time[stage] = _tmp
|
||||||
|
mem[stage] += self.fbw_mem[cat]
|
||||||
|
# noinspection PyTypeChecker
|
||||||
|
schedule[stage].append((cat, chunk, _cnt))
|
||||||
|
if cat == 1:
|
||||||
|
pending_w[stage].append((2, chunk, _cnt))
|
||||||
|
count[stage][cat * 2 + chunk] += 1
|
||||||
|
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
put(0, 0, i)
|
||||||
|
for i in range(self.n_stage - 1, -1, -1):
|
||||||
|
if i == self.n_stage - 1:
|
||||||
|
put(0, 1, i)
|
||||||
|
continue
|
||||||
|
tmp = end_time[self.get_id(0, 1, i + 1, 0)] + self.c_cost
|
||||||
|
while (
|
||||||
|
mem[i] + self.fbw_mem[0] * (2 + i * 2) <= self.max_mem
|
||||||
|
and cur_time[i] + self.fbw_cost[0] <= tmp
|
||||||
|
and count[i][0] < self.n_micro
|
||||||
|
):
|
||||||
|
for j in range(i + 1):
|
||||||
|
put(0, 0, j)
|
||||||
|
put(0, 1, i)
|
||||||
|
iter_chunk_ = 0
|
||||||
|
end_tmp = 0
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
if i == 0:
|
||||||
|
end_tmp = cur_time[0] + self.fbw_cost[1]
|
||||||
|
continue
|
||||||
|
tmp = end_tmp + self.c_cost
|
||||||
|
while (
|
||||||
|
count[i][0] + count[i][1] < count[i - 1][0] + count[i - 1][1]
|
||||||
|
or count[i][1] <= count[i - 1][1] < self.n_micro
|
||||||
|
):
|
||||||
|
for j in range(self.n_stage - 1, i - 1, -1):
|
||||||
|
if count[j][iter_chunk_] < self.n_micro:
|
||||||
|
put(0, iter_chunk_, j)
|
||||||
|
iter_chunk_ = 1 - iter_chunk_
|
||||||
|
|
||||||
|
for _ in range(2 * self.n_micro):
|
||||||
|
# check mem before putting b
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
while mem[i] + self.fbw_mem[1] > self.max_mem:
|
||||||
|
assert len(pending_w[i]) > 0
|
||||||
|
put_w(i)
|
||||||
|
b0_ranks, b1_ranks = [], []
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
if count[i][3] >= count[i][2]:
|
||||||
|
b0_ranks.append(i)
|
||||||
|
elif i == self.n_stage - 1:
|
||||||
|
b1_ranks.append(i)
|
||||||
|
else:
|
||||||
|
fa_id = self.get_id(1, 1, i + 1, count[i][3])
|
||||||
|
if end_time[fa_id] >= 0 or count[i][2] >= self.n_micro:
|
||||||
|
b1_ranks.append(i)
|
||||||
|
else:
|
||||||
|
b0_ranks.append(i)
|
||||||
|
b_ranks = []
|
||||||
|
# put b1
|
||||||
|
for i in reversed(b1_ranks):
|
||||||
|
b_ranks.append((i, 1))
|
||||||
|
# put b0
|
||||||
|
for i in b0_ranks:
|
||||||
|
b_ranks.append((i, 0))
|
||||||
|
for i, _chunk_ in b_ranks:
|
||||||
|
fa_id = -1
|
||||||
|
if _chunk_ == 1 and i < self.n_stage - 1:
|
||||||
|
fa_id = self.get_id(1, 1, i + 1, count[i][3])
|
||||||
|
if _chunk_ == 0 and i > 0:
|
||||||
|
fa_id = self.get_id(1, 0, i - 1, count[i][2])
|
||||||
|
while (
|
||||||
|
len(pending_w[i]) > 0
|
||||||
|
and fa_id >= 0
|
||||||
|
and end_time[fa_id] + self.c_cost >= cur_time[i] + self.fbw_cost[2]
|
||||||
|
):
|
||||||
|
# fill the bubble
|
||||||
|
put_w(i)
|
||||||
|
if (
|
||||||
|
len(pending_w[i]) > 0
|
||||||
|
and end_time[fa_id] + self.c_cost - cur_time[i] > get_max_stage_bubble(i) - stage_bubble[i]
|
||||||
|
):
|
||||||
|
if _chunk_ == 1:
|
||||||
|
put_w(i)
|
||||||
|
elif fill_b:
|
||||||
|
put_w(i)
|
||||||
|
put(1, _chunk_, i)
|
||||||
|
|
||||||
|
# put f
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
if count[i][1] >= self.n_micro:
|
||||||
|
continue
|
||||||
|
put_item = None
|
||||||
|
if count[i][1] >= count[i][0]:
|
||||||
|
put_item = 0
|
||||||
|
elif i == self.n_stage - 1:
|
||||||
|
put_item = 1
|
||||||
|
else:
|
||||||
|
if end_time[self.get_id(0, 1, i + 1, count[i][1])] >= 0:
|
||||||
|
put_item = 1
|
||||||
|
elif count[i][0] < self.n_micro:
|
||||||
|
if i == 0:
|
||||||
|
put_item = 0
|
||||||
|
elif end_time[self.get_id(0, 0, i - 1, count[i][0])] >= 0:
|
||||||
|
put_item = 0
|
||||||
|
if put_item is None:
|
||||||
|
continue
|
||||||
|
# check mem before putting f
|
||||||
|
while mem[i] + self.fbw_mem[0] > self.max_mem:
|
||||||
|
assert len(pending_w[i]) > 0
|
||||||
|
put_w(i)
|
||||||
|
fa_id = -1
|
||||||
|
if put_item == 0 and i > 0:
|
||||||
|
fa_id = self.get_id(0, 0, i - 1, count[i][0])
|
||||||
|
if put_item == 1 and i < self.n_stage - 1:
|
||||||
|
fa_id = self.get_id(0, 1, i + 1, count[i][1])
|
||||||
|
while (
|
||||||
|
len(pending_w[i]) > 0
|
||||||
|
and fa_id >= 0
|
||||||
|
and end_time[fa_id] + self.c_cost >= cur_time[i] + self.fbw_cost[2]
|
||||||
|
):
|
||||||
|
# fill the bubble
|
||||||
|
put_w(i)
|
||||||
|
if (
|
||||||
|
len(pending_w[i]) > 0
|
||||||
|
and end_time[fa_id] + self.c_cost - cur_time[i] > get_max_stage_bubble(i) - stage_bubble[i]
|
||||||
|
):
|
||||||
|
if fill_f:
|
||||||
|
put_w(i)
|
||||||
|
put(0, put_item, i)
|
||||||
|
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
while len(pending_w[i]) > 0:
|
||||||
|
put_w(i)
|
||||||
|
|
||||||
|
max_bubble = get_max_stage_bubble()
|
||||||
|
expected_time = sum(self.fbw_cost) * self.n_micro * 2
|
||||||
|
max_bubble / expected_time
|
||||||
|
if max_approved_bubble < 0 or max_bubble < max_approved_bubble:
|
||||||
|
_schedule, _end_time, _max_bubble = self.try_v_schedule(
|
||||||
|
fill_f=fill_f,
|
||||||
|
fill_b=fill_b,
|
||||||
|
approved_bubble=stage_bubble,
|
||||||
|
)
|
||||||
|
if _max_bubble < max_bubble:
|
||||||
|
return _schedule, _end_time, _max_bubble
|
||||||
|
return schedule, end_time, max_bubble
|
||||||
|
|
||||||
|
def print_details(self, end_time, print_scaling=1):
|
||||||
|
for stage in range(self.n_stage):
|
||||||
|
stage_str = ["."] * int(max(end_time) / print_scaling)
|
||||||
|
for _cat in range(3):
|
||||||
|
for _chunk in range(2):
|
||||||
|
for _micro in range(self.n_micro):
|
||||||
|
_id = self.get_id(_cat, _chunk, stage, _micro)
|
||||||
|
if end_time[_id] < 0:
|
||||||
|
continue
|
||||||
|
end = int(end_time[_id] / print_scaling)
|
||||||
|
start = int((end_time[_id] - self.fbw_cost[_cat]) / print_scaling)
|
||||||
|
for j in range(start, end):
|
||||||
|
if j == start or j == end - 1:
|
||||||
|
stage_str[j] = "FfBbWw"[_cat * 2 + _chunk]
|
||||||
|
elif j == start + 1:
|
||||||
|
if _micro >= 10:
|
||||||
|
stage_str[j] = str(_micro // 10)
|
||||||
|
else:
|
||||||
|
stage_str[j] = str(_micro)
|
||||||
|
elif j == start + 2 and _micro >= 10:
|
||||||
|
stage_str[j] = str(_micro % 10)
|
||||||
|
else:
|
||||||
|
stage_str[j] = "-"
|
||||||
|
_str = ""
|
||||||
|
for _c in stage_str:
|
||||||
|
_str += _c
|
||||||
|
print(_str)
|
||||||
|
|
||||||
|
def get_v_schedule(self, only_run_time=False):
|
||||||
|
schedule, end_time, max_bubble = None, None, None
|
||||||
|
expected_time = sum(self.fbw_cost) * self.n_micro * 2
|
||||||
|
for fill_b in [True, False]:
|
||||||
|
for fill_f in [True, False]:
|
||||||
|
_schedule, _end_time, _max_bubble = self.try_v_schedule(fill_b=fill_b, fill_f=fill_f)
|
||||||
|
if max_bubble is None or _max_bubble < max_bubble:
|
||||||
|
max_bubble = _max_bubble
|
||||||
|
schedule = _schedule
|
||||||
|
end_time = _end_time
|
||||||
|
if only_run_time:
|
||||||
|
return max_bubble + expected_time
|
||||||
|
max_bubble / (expected_time + max_bubble)
|
||||||
|
local_order = [[] for _ in range(self.n_stage)]
|
||||||
|
comm_id = {}
|
||||||
|
comm_id_counter = 0
|
||||||
|
post_validation_time = 0
|
||||||
|
for i in range(self.n_stage - 1, -1, -1):
|
||||||
|
pv_id = min(2 * (self.n_stage - 1 - i), self.n_micro - 1)
|
||||||
|
post_validation_time = max(
|
||||||
|
post_validation_time, end_time[self.get_id(0, 0, i, pv_id)] - self.fbw_cost[0] - self.c_cost
|
||||||
|
)
|
||||||
|
# post_validation_time = 0
|
||||||
|
for it in ["RECV_", "SEND_", ""]:
|
||||||
|
if i == 0 and it == "SEND_":
|
||||||
|
continue
|
||||||
|
if i == self.n_stage - 1 and it == "RECV_":
|
||||||
|
continue
|
||||||
|
# stage_ = i - 1 if it == "RECV_" else i
|
||||||
|
stage_ = i
|
||||||
|
local_order[stage_].append(
|
||||||
|
ScheduledNode(
|
||||||
|
type=it + "POST_VALIDATION",
|
||||||
|
chunk=0,
|
||||||
|
stage=stage_,
|
||||||
|
minibatch=0,
|
||||||
|
start_time=post_validation_time,
|
||||||
|
completion_time=post_validation_time,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
comm_id[local_order[stage_][-1]] = comm_id_counter
|
||||||
|
comm_id_counter += 1
|
||||||
|
for i in range(self.n_stage):
|
||||||
|
for _cat_, _chunk_, _micro_ in schedule[i]:
|
||||||
|
complete_time = end_time[self.get_id(_cat_, _chunk_, i, _micro_)]
|
||||||
|
local_order[i].append(
|
||||||
|
ScheduledNode(
|
||||||
|
type="FBW"[_cat_],
|
||||||
|
chunk=_chunk_ if _cat_ == 0 else 1 - _chunk_,
|
||||||
|
stage=i,
|
||||||
|
minibatch=_micro_,
|
||||||
|
start_time=complete_time - self.fbw_cost[_cat_],
|
||||||
|
completion_time=complete_time,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if _cat_ == 2: # no communication for W
|
||||||
|
continue
|
||||||
|
cat_str = "FORWARD" if _cat_ == 0 else "BACKWARD"
|
||||||
|
|
||||||
|
def communicate(send_recv, stage_):
|
||||||
|
# noinspection PyTypeChecker
|
||||||
|
local_order[stage_].append(
|
||||||
|
ScheduledNode(
|
||||||
|
type=send_recv + cat_str,
|
||||||
|
chunk=_chunk_ if _cat_ == 0 else 1 - _chunk_,
|
||||||
|
stage=stage_,
|
||||||
|
minibatch=_micro_,
|
||||||
|
start_time=complete_time,
|
||||||
|
completion_time=complete_time,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
comm_id[local_order[stage_][-1]] = comm_id_counter
|
||||||
|
|
||||||
|
if _chunk_ == 1 and i > 0:
|
||||||
|
communicate("SEND_", i)
|
||||||
|
communicate("RECV_", i - 1)
|
||||||
|
if _chunk_ == 0 and i < self.n_stage - 1:
|
||||||
|
communicate("SEND_", i)
|
||||||
|
communicate("RECV_", i + 1)
|
||||||
|
comm_id_counter += 1
|
||||||
|
for rank in range(self.n_stage):
|
||||||
|
# For nodes with the same timestamp on the same stage, communication will be prioritized.
|
||||||
|
def even_breaker(x: ScheduledNode):
|
||||||
|
# Compute nodes are always delayed.
|
||||||
|
if x.type in ["F", "B", "W"]:
|
||||||
|
return comm_id_counter
|
||||||
|
# For comm nodes, order by their unique comm id
|
||||||
|
return comm_id[x]
|
||||||
|
|
||||||
|
local_order[rank] = list(sorted(local_order[rank], key=lambda x: (x.start_time, even_breaker(x))))
|
||||||
|
# If a recv with intersects with previous computation, reorder them so that recv
|
||||||
|
# is executed before computation and hence can be overlapped.
|
||||||
|
for i in range(len(local_order[rank])):
|
||||||
|
if (
|
||||||
|
i > 0
|
||||||
|
and local_order[rank][i - 1].type in {"F", "B", "W"}
|
||||||
|
and local_order[rank][i].type.startswith("RECV")
|
||||||
|
and "POST_VALIDATION" not in local_order[rank][i].type
|
||||||
|
and local_order[rank][i].start_time <= local_order[rank][i - 1].completion_time
|
||||||
|
):
|
||||||
|
local_order[rank][i], local_order[rank][i - 1] = local_order[rank][i - 1], local_order[rank][i]
|
||||||
|
|
||||||
|
local_order_with_rollback = [[] for _ in range(self.n_stage)]
|
||||||
|
for rank in range(self.n_stage):
|
||||||
|
rollback_comm = set()
|
||||||
|
if rank > 0:
|
||||||
|
for node in local_order[rank - 1]:
|
||||||
|
if node.type == "POST_VALIDATION":
|
||||||
|
break
|
||||||
|
if node.type == "SEND_FORWARD":
|
||||||
|
assert node.chunk == 0
|
||||||
|
rollback_comm.add(node.minibatch)
|
||||||
|
for node in local_order[rank]:
|
||||||
|
if node.type == "RECV_FORWARD" and node.chunk == 0 and node.minibatch in rollback_comm:
|
||||||
|
rollback = True
|
||||||
|
rollback_comm.remove(node.minibatch)
|
||||||
|
else:
|
||||||
|
rollback = False
|
||||||
|
local_order_with_rollback[rank].append(
|
||||||
|
ScheduledNode(
|
||||||
|
type=node.type,
|
||||||
|
chunk=node.chunk,
|
||||||
|
stage=node.stage,
|
||||||
|
minibatch=node.minibatch,
|
||||||
|
start_time=node.start_time,
|
||||||
|
completion_time=node.completion_time,
|
||||||
|
rollback=rollback,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert len(rollback_comm) == 0
|
||||||
|
|
||||||
|
return local_order_with_rollback
|
@ -0,0 +1,958 @@
|
|||||||
|
from functools import partial
|
||||||
|
from typing import Any, Callable, Dict, Iterable, List, Optional, Union
|
||||||
|
|
||||||
|
import torch
|
||||||
|
import torch.cuda
|
||||||
|
import torch.distributed
|
||||||
|
from torch.nn import Module, ModuleList
|
||||||
|
from torch.utils._pytree import tree_flatten, tree_map
|
||||||
|
|
||||||
|
from colossalai.accelerator import get_accelerator
|
||||||
|
from colossalai.interface import OptimizerWrapper
|
||||||
|
from colossalai.pipeline.p2p import PipelineP2PCommunication, create_send_metadata
|
||||||
|
from colossalai.pipeline.schedule.v_schedule import ScheduledNode
|
||||||
|
from colossalai.pipeline.stage_manager import PipelineStageManager
|
||||||
|
from colossalai.pipeline.weight_grad_store import WeightGradStore
|
||||||
|
|
||||||
|
from ._utils import (
|
||||||
|
clone,
|
||||||
|
detach,
|
||||||
|
get_batch_size,
|
||||||
|
get_micro_batch,
|
||||||
|
merge_batch,
|
||||||
|
model_forward,
|
||||||
|
release_tensor_data,
|
||||||
|
require_grad,
|
||||||
|
retain_grad,
|
||||||
|
to_device,
|
||||||
|
)
|
||||||
|
from .base import PipelineSchedule
|
||||||
|
|
||||||
|
AUTO_SCHEDULE_COMMUNICATION_TYPES = {"RECV_FORWARD", "RECV_BACKWARD", "SEND_FORWARD", "SEND_BACKWARD"}
|
||||||
|
|
||||||
|
|
||||||
|
def _wait_p2p(wait_handles: List[torch.cuda.Event]) -> None:
|
||||||
|
if wait_handles is not None:
|
||||||
|
for req in wait_handles:
|
||||||
|
req.wait()
|
||||||
|
|
||||||
|
|
||||||
|
class ZeroBubbleVPipeScheduler(PipelineSchedule):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
stage_manager: PipelineStageManager,
|
||||||
|
schedule: List[ScheduledNode],
|
||||||
|
num_model_chunks: int,
|
||||||
|
num_microbatch: Optional[int] = None,
|
||||||
|
microbatch_size: Optional[int] = None,
|
||||||
|
enable_metadata_cache: bool = True,
|
||||||
|
overlap_p2p: bool = True,
|
||||||
|
):
|
||||||
|
super().__init__(stage_manager)
|
||||||
|
# batch info
|
||||||
|
self.num_microbatch = num_microbatch
|
||||||
|
self.microbatch_size = microbatch_size
|
||||||
|
self.num_model_chunks = num_model_chunks
|
||||||
|
self.batch: Any
|
||||||
|
self.batch_size: int
|
||||||
|
self.last_batch_size: Optional[int] = None
|
||||||
|
self.microbatch_offset: List[int]
|
||||||
|
|
||||||
|
self.schedules = schedule
|
||||||
|
# TODO: optim post valid
|
||||||
|
self.do_post_validation = False
|
||||||
|
|
||||||
|
# P2PMeta cache
|
||||||
|
self.enable_metadata_cache = enable_metadata_cache
|
||||||
|
|
||||||
|
# check send_tensor_metadata, send_grad_metadata
|
||||||
|
# pp4 as sample, we should follow this meta strategy
|
||||||
|
# send_tensor_meta(fwd) send_grad_meta(bwd)
|
||||||
|
# chunk0 | chunk1 chunk0 | chunk 1
|
||||||
|
# stage 0 T | F F | T
|
||||||
|
# stage 1 T | T T | T
|
||||||
|
# stage 2 T | T T | T
|
||||||
|
# stage 3 F | T F | T
|
||||||
|
if stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
self.send_tensor_metadata = [True, False]
|
||||||
|
self.send_grad_metadata = [False, True]
|
||||||
|
elif stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
self.send_tensor_metadata = [False, True]
|
||||||
|
self.send_grad_metadata = [True, False]
|
||||||
|
else:
|
||||||
|
self.send_tensor_metadata = [True, True]
|
||||||
|
self.send_grad_metadata = [True, True]
|
||||||
|
|
||||||
|
# meta cache buffer
|
||||||
|
self.tensor_metadata_recv = [None, None] # [chunk 0 meta, chunk 1 meta]
|
||||||
|
self.grad_metadata_recv = [None, None]
|
||||||
|
|
||||||
|
# P2P communication
|
||||||
|
self.comm = PipelineP2PCommunication(stage_manager, overlap_p2p=overlap_p2p)
|
||||||
|
|
||||||
|
# init communication map
|
||||||
|
self.communication_map = {
|
||||||
|
"SEND_FORWARD": self.send_forward,
|
||||||
|
"RECV_FORWARD": self.recv_forward,
|
||||||
|
"SEND_BACKWARD": self.send_backward,
|
||||||
|
"RECV_BACKWARD": self.recv_backward,
|
||||||
|
}
|
||||||
|
|
||||||
|
# init buffer
|
||||||
|
self._free_buffers()
|
||||||
|
|
||||||
|
def _free_buffers(self):
|
||||||
|
# free local buffer
|
||||||
|
# two dim array, first dim is the model chunk, second dim is the microbatch queue
|
||||||
|
|
||||||
|
# x & y buffer for schedule b
|
||||||
|
self.input_tensors = [[], []]
|
||||||
|
self.output_tensors = [[], []]
|
||||||
|
|
||||||
|
# y & dy buffer for schedule w
|
||||||
|
self.output_tensors_dw = [[], []]
|
||||||
|
self.output_tensors_grad_dw = [[], []]
|
||||||
|
|
||||||
|
# buffer for communication
|
||||||
|
self.send_forward_buffer = [[], []] # [chunk0:[torch.Tensor], chunk1:[torch.Tensor]]
|
||||||
|
self.recv_forward_buffer = [
|
||||||
|
[],
|
||||||
|
[],
|
||||||
|
] # [chunk0:[(torch.Tensor, wait_handle)], chunk1:[(torch.Tensor, wait_handle)]]
|
||||||
|
self.send_backward_buffer = [[], []] # [chunk0:[torch.Tensor], chunk1:[torch.Tensor]]
|
||||||
|
self.recv_backward_buffer = [
|
||||||
|
[],
|
||||||
|
[],
|
||||||
|
] # [chunk0:[(torch.Tensor, wait_handle)], chunk1:[(torch.Tensor, wait_handle)]]
|
||||||
|
|
||||||
|
# y buffer for local send fwd
|
||||||
|
self.local_send_forward_buffer = []
|
||||||
|
# dy buffer for local send bwd
|
||||||
|
self.local_send_backward_buffer = []
|
||||||
|
|
||||||
|
# wait pp buffer
|
||||||
|
self.wait_handles = []
|
||||||
|
|
||||||
|
def assert_buffer_empty(self):
|
||||||
|
# assert buffer is empty at end
|
||||||
|
assert len(self.input_tensors[0]) == 0
|
||||||
|
assert len(self.input_tensors[1]) == 0
|
||||||
|
assert len(self.output_tensors[0]) == 0
|
||||||
|
assert len(self.output_tensors[1]) == 0
|
||||||
|
assert len(self.output_tensors_dw[0]) == 0
|
||||||
|
assert len(self.output_tensors_dw[1]) == 0
|
||||||
|
assert len(self.output_tensors_grad_dw[0]) == 0
|
||||||
|
assert len(self.output_tensors_grad_dw[1]) == 0
|
||||||
|
assert len(self.send_forward_buffer[0]) == 0
|
||||||
|
assert len(self.send_forward_buffer[1]) == 0
|
||||||
|
assert len(self.recv_forward_buffer[0]) == 0
|
||||||
|
assert len(self.recv_forward_buffer[1]) == 0
|
||||||
|
assert len(self.send_backward_buffer[0]) == 0
|
||||||
|
assert len(self.send_backward_buffer[1]) == 0
|
||||||
|
assert len(self.recv_backward_buffer[0]) == 0
|
||||||
|
assert len(self.recv_backward_buffer[1]) == 0
|
||||||
|
assert len(self.local_send_forward_buffer) == 0
|
||||||
|
assert len(self.local_send_backward_buffer) == 0
|
||||||
|
|
||||||
|
def load_batch(self, data_iter: Iterable, device: Optional[torch.device] = None) -> None:
|
||||||
|
"""Load a batch from data iterator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_iter (Iterable): Data iterator.
|
||||||
|
device (Optional[torch.device], optional): Target device. Defaults to None.
|
||||||
|
"""
|
||||||
|
batch = next(data_iter)
|
||||||
|
if device is not None:
|
||||||
|
batch = tree_map(partial(to_device, device=device), batch)
|
||||||
|
|
||||||
|
self.microbatch_offset = [0 for _ in range(self.num_model_chunks)]
|
||||||
|
self.batch = batch
|
||||||
|
self.batch_size = get_batch_size(batch)
|
||||||
|
|
||||||
|
if self.microbatch_size is None:
|
||||||
|
assert self.batch_size % self.num_microbatch == 0, "Batch size should divided by the number of microbatch"
|
||||||
|
self.microbatch_size = self.batch_size // self.num_microbatch
|
||||||
|
if self.num_microbatch is None:
|
||||||
|
assert self.batch_size % self.microbatch_size == 0, "Batch size should divided by the microbatch size"
|
||||||
|
self.num_microbatch = self.batch_size // self.microbatch_size
|
||||||
|
|
||||||
|
if not self.forward_only:
|
||||||
|
assert self.last_batch_size is None or self.last_batch_size == self.batch_size
|
||||||
|
assert self.batch_size == self.microbatch_size * self.num_microbatch
|
||||||
|
|
||||||
|
assert (
|
||||||
|
self.num_microbatch % self.stage_manager.num_stages == 0
|
||||||
|
), "Number of microbatch should be an integer multiple of number of pipeline parallel devices"
|
||||||
|
|
||||||
|
if self.forward_only:
|
||||||
|
self.num_microbatch = (self.batch_size - 1) // self.microbatch_size + 1
|
||||||
|
|
||||||
|
self.last_batch_size = self.batch_size
|
||||||
|
|
||||||
|
def load_micro_batch(self, model_chunk_id: int) -> Any:
|
||||||
|
"""Load a micro batch from the current batch.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
microbatch_id (int): the current model chunk idx.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: Micro batch.
|
||||||
|
"""
|
||||||
|
assert self.microbatch_offset[model_chunk_id] <= self.batch_size, "Microbatches exhausted"
|
||||||
|
micro_batch = get_micro_batch(self.batch, self.microbatch_offset[model_chunk_id], self.microbatch_size)
|
||||||
|
self.microbatch_offset[model_chunk_id] += self.microbatch_size
|
||||||
|
return tree_map(partial(to_device, device=get_accelerator().get_current_device()), micro_batch)
|
||||||
|
|
||||||
|
def get_model_chunk_id(self, microbatch_id: int, is_forward: bool) -> int:
|
||||||
|
"""Helper method to get the model chunk ID given the iteration number.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
microbatch_id (int): the current microbatch idx
|
||||||
|
forward (bool): if is the forward process
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The model chunk idx of the input microbatch_id
|
||||||
|
"""
|
||||||
|
assert (
|
||||||
|
microbatch_id < self.num_microbatch * self.num_model_chunks
|
||||||
|
), f"microbatch_id {microbatch_id} is out of range ({self.num_microbatch * self.num_model_chunks})"
|
||||||
|
microbatch_id_in_group = microbatch_id % (self.stage_manager.num_stages * self.num_model_chunks)
|
||||||
|
model_chunk_id = microbatch_id_in_group // self.stage_manager.num_stages
|
||||||
|
if not is_forward:
|
||||||
|
# Reverse order
|
||||||
|
model_chunk_id = self.num_model_chunks - model_chunk_id - 1
|
||||||
|
return model_chunk_id
|
||||||
|
|
||||||
|
def recv_forward(self, model_chunk_id: int, prev_rank: int = None) -> List:
|
||||||
|
"""Copy the forward output from the previous stage in pipeline as the input tensor of this stage.
|
||||||
|
For ZBV.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_chunk_id (int): The current model chunk idx.
|
||||||
|
prev_rank (int, optional): The rank of the source of the tensor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The input tensor or input tensor list.
|
||||||
|
Any: The wait handles for the communication.
|
||||||
|
"""
|
||||||
|
with self.stage_manager.switch_model_chunk_id(model_chunk_id):
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
################
|
||||||
|
# chunk = 0 & is_first_stage
|
||||||
|
# do nothing; cause u are chunk 0 in first rank, u have no prev rank;
|
||||||
|
#################
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 0 & not is_first_stage
|
||||||
|
# Recv y from PREV_rank as input
|
||||||
|
#################
|
||||||
|
else:
|
||||||
|
prev_rank = self.stage_manager.get_prev_rank()
|
||||||
|
input_tensor, wait_handles = self.comm.recv_forward(
|
||||||
|
prev_rank=prev_rank, metadata_recv=self.tensor_metadata_recv[model_chunk_id]
|
||||||
|
)
|
||||||
|
if self.enable_metadata_cache and self.tensor_metadata_recv[model_chunk_id] is None:
|
||||||
|
self.tensor_metadata_recv[model_chunk_id] = create_send_metadata(input_tensor)
|
||||||
|
self.recv_forward_buffer[model_chunk_id].append((input_tensor, wait_handles))
|
||||||
|
return wait_handles
|
||||||
|
|
||||||
|
else:
|
||||||
|
################
|
||||||
|
# chunk = 1 & is_last_stage
|
||||||
|
# do nothing; cause u get y from local_send_forward_buffer in schedule f
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
# return None, []
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 1 & not is_last_stage
|
||||||
|
# recv y from NEXT_rank as input
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
next_rank = self.stage_manager.get_next_rank()
|
||||||
|
input_tensor, wait_handles = self.comm.recv_forward(
|
||||||
|
next_rank, metadata_recv=self.tensor_metadata_recv[model_chunk_id]
|
||||||
|
)
|
||||||
|
if self.enable_metadata_cache and self.tensor_metadata_recv[model_chunk_id] is None:
|
||||||
|
self.tensor_metadata_recv[model_chunk_id] = create_send_metadata(input_tensor)
|
||||||
|
self.recv_forward_buffer[model_chunk_id].append((input_tensor, wait_handles))
|
||||||
|
return wait_handles
|
||||||
|
|
||||||
|
def recv_backward(self, model_chunk_id: int, next_rank: int = None) -> List:
|
||||||
|
"""Copy the gradient tensor from the next stage in pipeline as the input gradient of this stage.
|
||||||
|
For ZBV.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_chunk_id (int): The current model chunk idx.
|
||||||
|
next_rank (int, optional): The rank of the source of the tensor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The input gradient tensor or gradient tensor list.
|
||||||
|
Any: The wait handles for the communication.
|
||||||
|
"""
|
||||||
|
with self.stage_manager.switch_model_chunk_id(model_chunk_id):
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
# bwd chunk0 is right V;
|
||||||
|
################
|
||||||
|
# chunk = 0 & is_last_stage
|
||||||
|
# do nothing; Already get dy from local_send_backward_buffer in schedule b
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 0 & not is_last_stage
|
||||||
|
# Recv bwd from next stage;
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
next_rank = self.stage_manager.get_next_rank()
|
||||||
|
output_tensor_grad, wait_handles = self.comm.recv_backward(
|
||||||
|
next_rank, metadata_recv=self.grad_metadata_recv[model_chunk_id]
|
||||||
|
)
|
||||||
|
if self.enable_metadata_cache and self.grad_metadata_recv[model_chunk_id] is None:
|
||||||
|
self.grad_metadata_recv[model_chunk_id] = create_send_metadata(output_tensor_grad)
|
||||||
|
self.recv_backward_buffer[model_chunk_id].append((output_tensor_grad, wait_handles))
|
||||||
|
return wait_handles
|
||||||
|
|
||||||
|
else:
|
||||||
|
# bwd chunk1 is left V;
|
||||||
|
################
|
||||||
|
# chunk = 1 & is_first_stage
|
||||||
|
# do nothing; get loss from local
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 1 & not first stage
|
||||||
|
# recv_backward recv bwd from prev stage;
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
prev_rank = self.stage_manager.get_prev_rank()
|
||||||
|
output_tensor_grad, wait_handles = self.comm.recv_backward(
|
||||||
|
next_rank=prev_rank, metadata_recv=self.grad_metadata_recv[model_chunk_id]
|
||||||
|
)
|
||||||
|
if self.enable_metadata_cache and self.grad_metadata_recv[model_chunk_id] is None:
|
||||||
|
self.grad_metadata_recv[model_chunk_id] = create_send_metadata(output_tensor_grad)
|
||||||
|
self.recv_backward_buffer[model_chunk_id].append((output_tensor_grad, wait_handles))
|
||||||
|
return wait_handles
|
||||||
|
|
||||||
|
def send_forward(self, model_chunk_id: int, next_rank: int = None) -> List:
|
||||||
|
"""Sends the input tensor to the next stage in pipeline.
|
||||||
|
For ZBV.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_chunk_id (int): The current model chunk idx.
|
||||||
|
next_rank (int, optional): The rank of the recipient of the tensor.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The wait handles for the communication.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with self.stage_manager.switch_model_chunk_id(model_chunk_id):
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
################
|
||||||
|
# chunk = 0 && is_last_stage
|
||||||
|
# do nothing; hold y on local_send_forward_buffer
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
self.send_tensor_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 0 && not is_last_stage
|
||||||
|
# self.comm.send_forward send y to NEXT stage
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
next_rank = self.stage_manager.get_next_rank()
|
||||||
|
output_tensor = self.send_forward_buffer[model_chunk_id].pop(0)
|
||||||
|
send_handles = self.comm.send_forward(
|
||||||
|
output_object=output_tensor,
|
||||||
|
next_rank=next_rank,
|
||||||
|
send_metadata=self.send_tensor_metadata[model_chunk_id],
|
||||||
|
)
|
||||||
|
self.send_tensor_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return send_handles
|
||||||
|
|
||||||
|
else:
|
||||||
|
################
|
||||||
|
# chunk = 1 && is_first_stage
|
||||||
|
# do nothing; Already send LOSS to local_send_backward_buffer in schedule f send part
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
self.send_tensor_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 1 && not is_first_stage
|
||||||
|
# self.comm.send_forward send y to PREV stage
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
prev_rank = self.stage_manager.get_prev_rank()
|
||||||
|
output_tensor = self.send_forward_buffer[model_chunk_id].pop(0)
|
||||||
|
send_handles = self.comm.send_forward(
|
||||||
|
output_tensor, prev_rank, send_metadata=self.send_tensor_metadata[model_chunk_id]
|
||||||
|
)
|
||||||
|
self.send_tensor_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return send_handles
|
||||||
|
|
||||||
|
def send_backward(self, model_chunk_id: int, prev_rank: int = None) -> List:
|
||||||
|
"""Sends the gradient tensor to the previous stage in pipeline.
|
||||||
|
For ZBV.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_chunk_id (int): The current model chunk idx.
|
||||||
|
prev_rank (int, optional): The rank of the recipient of the tensor
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Any: The wait handles for the communication.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with self.stage_manager.switch_model_chunk_id(model_chunk_id):
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
# bwd chunk0 is right V;
|
||||||
|
################
|
||||||
|
# chunk = 0 && is_first_stage
|
||||||
|
# do nothing; cause u are the first chunk in first stage; bwd end
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
self.send_grad_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 0 && not is_first_stage
|
||||||
|
# Send dx to PREV stage;
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
prev_rank = self.stage_manager.get_prev_rank()
|
||||||
|
input_tensor_grad = self.send_backward_buffer[model_chunk_id].pop(0)
|
||||||
|
send_handles = self.comm.send_backward(
|
||||||
|
input_tensor_grad, prev_rank, send_metadata=self.send_grad_metadata[model_chunk_id]
|
||||||
|
)
|
||||||
|
self.send_grad_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return send_handles
|
||||||
|
|
||||||
|
# bwd chunk1 is left V;
|
||||||
|
else:
|
||||||
|
################
|
||||||
|
# chunk = 1 && is_last_stage
|
||||||
|
# do nothing; Already send input_tensor_grad to local_send_bwd_buffer in schedule b;
|
||||||
|
################
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
self.send_grad_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return []
|
||||||
|
|
||||||
|
################
|
||||||
|
# chunk = 1 && not is_last_stage
|
||||||
|
# Send dx to NEXT stage;
|
||||||
|
################
|
||||||
|
else:
|
||||||
|
next_rank = self.stage_manager.get_next_rank()
|
||||||
|
input_tensor_grad = self.send_backward_buffer[model_chunk_id].pop(0)
|
||||||
|
send_handles = self.comm.send_backward(
|
||||||
|
input_tensor_grad, next_rank, send_metadata=self.send_grad_metadata[model_chunk_id]
|
||||||
|
)
|
||||||
|
self.send_grad_metadata[model_chunk_id] = not self.enable_metadata_cache
|
||||||
|
return send_handles
|
||||||
|
|
||||||
|
def forward_step(
|
||||||
|
self,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
model_chunk_id: int,
|
||||||
|
micro_batch: Optional[dict],
|
||||||
|
input_obj: Optional[dict],
|
||||||
|
criterion: Callable,
|
||||||
|
accum_loss: Optional[torch.Tensor] = None,
|
||||||
|
outputs: Optional[List[Any]] = None,
|
||||||
|
) -> Union[torch.Tensor, dict]:
|
||||||
|
"""Forward one step of the pipeline
|
||||||
|
Args:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be run;
|
||||||
|
model_chunk_id (int): The current model chunk idx;
|
||||||
|
input_obj (Optional[dict]): x;
|
||||||
|
criterion (Callable): loss function;
|
||||||
|
accum_loss (Optional[torch.Tensor], optional): Accumulated loss. Defaults to None.
|
||||||
|
outputs (Optional[List[Any]], optional): List to store the output of the last stage (final output). Defaults to None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Union[torch.Tensor, dict]: The intermediate output (dict) of the current stage. If it is the last stage, the output is the loss (Tensor).
|
||||||
|
"""
|
||||||
|
# Load input ids, attention mask and labels
|
||||||
|
# for the first stage, input_obj is None; So,we use micro_batch as input_obj
|
||||||
|
# for other stages, input_obj is the output of the previous/next stage containing hidden_states etc.
|
||||||
|
# Only attention_mask from micro_batch is used
|
||||||
|
with self.stage_manager.switch_model_chunk_id(model_chunk_id):
|
||||||
|
# fwd calculate
|
||||||
|
internal_inputs = {} if input_obj is None else input_obj
|
||||||
|
internal_inputs["stage_index"] = self.stage_manager.stage_indices[model_chunk_id]
|
||||||
|
output_obj = model_forward(model_chunk, micro_batch, internal_inputs)
|
||||||
|
# last layer in model
|
||||||
|
if model_chunk_id == 1 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
loss = criterion(output_obj, micro_batch) / self.num_microbatch
|
||||||
|
if accum_loss is not None:
|
||||||
|
accum_loss.add_(loss.detach())
|
||||||
|
if outputs is not None:
|
||||||
|
outputs.append(tree_map(detach, output_obj))
|
||||||
|
return loss
|
||||||
|
else:
|
||||||
|
return output_obj
|
||||||
|
|
||||||
|
def backward_b_step(
|
||||||
|
self,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
model_chunk_id: int,
|
||||||
|
optimizer: OptimizerWrapper,
|
||||||
|
# micro_batch: Optional[dict],
|
||||||
|
input_obj: Optional[dict],
|
||||||
|
output_obj: Union[dict, torch.Tensor],
|
||||||
|
output_obj_grad: Optional[dict],
|
||||||
|
) -> Optional[dict]:
|
||||||
|
"""Backward dx step of the pipeline; we calculate "dx = w*dy" here;
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be run;
|
||||||
|
model_chunk_id (int): The current model chunk idx;
|
||||||
|
optimizer (OptimizerWrapper): Optimizer to update the model
|
||||||
|
input_obj (Optional[Tuple(dict)]): x. (microbatch, input_obj)
|
||||||
|
output_obj (Union[dict, torch.Tensor]): y.
|
||||||
|
output_obj_grad (dict): dy.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[dict]: dx.
|
||||||
|
"""
|
||||||
|
# calculate bwd b step ; only dx = w*dy;
|
||||||
|
|
||||||
|
# Retain the grad on the input_obj. No need retain_grad microbatch
|
||||||
|
if input_obj is not None:
|
||||||
|
tree_map(retain_grad, input_obj)
|
||||||
|
|
||||||
|
# x, y, dy list for backward_by_grad; Type: list[tensor];
|
||||||
|
input_obj_ = []
|
||||||
|
output_obj_ = []
|
||||||
|
output_obj_grad_ = []
|
||||||
|
|
||||||
|
# For chunk 0 stage 0, use micro_batch as input_obj_; and we don't have to cal microbatch dx.
|
||||||
|
|
||||||
|
# For loss backward; output_obj is loss; output_obj_grad should be None
|
||||||
|
if model_chunk_id == 1 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
assert output_obj_grad is None
|
||||||
|
input_obj_, _ = tree_flatten(input_obj)
|
||||||
|
output_obj_.append(output_obj) # LOSS
|
||||||
|
output_obj_grad_.append(output_obj_grad) # None
|
||||||
|
|
||||||
|
# For other chunk stage, use input_obj as input_obj_;
|
||||||
|
else:
|
||||||
|
input_obj_, _ = tree_flatten(input_obj)
|
||||||
|
output_obj_, _ = tree_flatten(output_obj) # y
|
||||||
|
output_obj_grad_, _ = tree_flatten(output_obj_grad) # dy
|
||||||
|
|
||||||
|
# filter item which is not torch.Tensor
|
||||||
|
input_obj_ = [v for v in input_obj_ if isinstance(v, torch.Tensor) or v is None]
|
||||||
|
output_obj_ = [v for v in output_obj_ if isinstance(v, torch.Tensor) or v is None]
|
||||||
|
output_obj_grad_ = [v for v in output_obj_grad_ if isinstance(v, torch.Tensor) or v is None]
|
||||||
|
|
||||||
|
try:
|
||||||
|
ctx = optimizer.no_sync()
|
||||||
|
except AttributeError:
|
||||||
|
ctx = model_chunk.no_sync()
|
||||||
|
with ctx:
|
||||||
|
optimizer.backward_by_grad(
|
||||||
|
tensor=output_obj_,
|
||||||
|
grad=output_obj_grad_,
|
||||||
|
# inputs=input_obj_,
|
||||||
|
retain_graph=False,
|
||||||
|
)
|
||||||
|
# Format output_obj_grad
|
||||||
|
input_obj_grad = dict()
|
||||||
|
if model_chunk_id == 0 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
for k, v in input_obj.items():
|
||||||
|
if isinstance(v, torch.Tensor) and v.grad is not None:
|
||||||
|
input_obj_grad[k] = v.grad
|
||||||
|
return input_obj_grad
|
||||||
|
|
||||||
|
def backward_w_step(
|
||||||
|
self,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
model_chunk_id: int,
|
||||||
|
optimizer: OptimizerWrapper,
|
||||||
|
output_obj: Union[dict, torch.Tensor],
|
||||||
|
output_obj_grad: Optional[dict],
|
||||||
|
):
|
||||||
|
"""Backward dw step of the pipeline; we calculate "dw = x*dy" here;
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be run;
|
||||||
|
model_chunk_id (int): The current model chunk idx;
|
||||||
|
optimizer (OptimizerWrapper): Optimizer to update the model
|
||||||
|
output_obj (Union[dict, torch.Tensor]): y.
|
||||||
|
output_obj_grad (dict): dy.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Nothing need to return; we only calculate dw then update w;
|
||||||
|
"""
|
||||||
|
# calculate bwd w step ; only dw = x*dy;
|
||||||
|
|
||||||
|
# y, dy list for w backward_by_grad; Type: list[tensor];
|
||||||
|
output_obj_ = []
|
||||||
|
output_obj_grad_ = []
|
||||||
|
|
||||||
|
if model_chunk_id == 1 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
# loss backward; output_obj is loss;
|
||||||
|
output_obj_.append(output_obj) # LOSS
|
||||||
|
output_obj_grad_.append(None) # None
|
||||||
|
else:
|
||||||
|
output_obj_, _ = tree_flatten(output_obj) # y
|
||||||
|
output_obj_grad_, _ = tree_flatten(output_obj_grad) # dy
|
||||||
|
|
||||||
|
# filter item which is not torch.Tensor
|
||||||
|
output_obj_ = [v for v in output_obj_ if isinstance(v, torch.Tensor) or v is None]
|
||||||
|
output_obj_grad_ = [v for v in output_obj_grad_ if isinstance(v, torch.Tensor) or v is None]
|
||||||
|
|
||||||
|
optimizer.backward_by_grad(
|
||||||
|
tensor=output_obj_,
|
||||||
|
grad=output_obj_grad_,
|
||||||
|
inputs=list(model_chunk.parameters()),
|
||||||
|
retain_graph=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
def schedule_f(
|
||||||
|
self,
|
||||||
|
scheduled_node,
|
||||||
|
model_chunk: torch.nn.ModuleList,
|
||||||
|
model_chunk_id: int,
|
||||||
|
criterion: Callable,
|
||||||
|
accum_loss: Optional[torch.Tensor] = None,
|
||||||
|
outputs: Optional[List[Any]] = None,
|
||||||
|
):
|
||||||
|
"""A complete forward schedule; Include recv fwd --> cal fwd --> send fwd;
|
||||||
|
|
||||||
|
Args:
|
||||||
|
scheduled_node:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be run;
|
||||||
|
model_chunk_id (int): The current model chunk idx;
|
||||||
|
criterion (Callable): loss function;
|
||||||
|
accum_loss (Optional[torch.Tensor], optional): Accumulated loss. Defaults to None.
|
||||||
|
outputs (Optional[List[Any]], optional): List to store the output of the last stage (final output). Defaults to None.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Nothing.
|
||||||
|
"""
|
||||||
|
micro_batch = self.load_micro_batch(model_chunk_id=model_chunk_id)
|
||||||
|
# Step1: recv fwd
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
# is first stage; get input from microbatch
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
input_obj = None # (tensor, wait_handle)
|
||||||
|
else:
|
||||||
|
input_obj = self.recv_forward_buffer[model_chunk_id].pop(0)
|
||||||
|
for h in input_obj[1]:
|
||||||
|
h.wait()
|
||||||
|
input_obj = input_obj[0]
|
||||||
|
else:
|
||||||
|
# is last stage; recv from local
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
input_obj = self.local_send_forward_buffer.pop(0)
|
||||||
|
# not last stage; recv from next
|
||||||
|
else:
|
||||||
|
input_obj = self.recv_forward_buffer[model_chunk_id].pop(0)
|
||||||
|
for h in input_obj[1]:
|
||||||
|
h.wait()
|
||||||
|
input_obj = input_obj[0]
|
||||||
|
# Here, let input_obj.requires_grad_()
|
||||||
|
# if input_obj is not None:
|
||||||
|
if not isinstance(input_obj, torch.Tensor):
|
||||||
|
tree_map(require_grad, input_obj)
|
||||||
|
|
||||||
|
# Also requires_grad_ for micro_batch in stage 0 chunk 0 fwd,
|
||||||
|
# tree_map(torch.Tensor.requires_grad_, micro_batch)
|
||||||
|
|
||||||
|
# Step2: fwd step
|
||||||
|
output_obj = self.forward_step(
|
||||||
|
model_chunk=model_chunk,
|
||||||
|
model_chunk_id=model_chunk_id,
|
||||||
|
micro_batch=micro_batch,
|
||||||
|
input_obj=input_obj,
|
||||||
|
criterion=criterion,
|
||||||
|
accum_loss=accum_loss,
|
||||||
|
outputs=outputs,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step3:
|
||||||
|
# 3-1:detach output; detach output for send fwd;
|
||||||
|
if model_chunk_id == 1 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
# We should not detach bwd LOSS
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# detach output
|
||||||
|
detached_output_obj = tree_map(detach, output_obj)
|
||||||
|
# 3-2 clone detached_output_obj
|
||||||
|
detached_output_obj = tree_map(clone, detached_output_obj)
|
||||||
|
|
||||||
|
# 3-3 release cloned output.data; release_tensor_data output for bwd b & w; (do not detach output)
|
||||||
|
if model_chunk_id == 1 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
# We should not release_tensor_data bwd LOSS
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# release_tensor_data output
|
||||||
|
tree_map(release_tensor_data, output_obj)
|
||||||
|
|
||||||
|
# add input and output object for backward b
|
||||||
|
self.input_tensors[model_chunk_id].append(input_obj)
|
||||||
|
|
||||||
|
# for bwd b&w, we only need the graph(grad_fn) of output_obj
|
||||||
|
# Do not release_tensor_data loss, release_tensor_data other output_obj;
|
||||||
|
if model_chunk_id == 1 and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
self.output_tensors[model_chunk_id].append(output_obj)
|
||||||
|
else:
|
||||||
|
self.output_tensors[model_chunk_id].append(output_obj)
|
||||||
|
|
||||||
|
# add output to send_fwd_buffer
|
||||||
|
if model_chunk_id == 0: # chunk 0
|
||||||
|
# is last stage; send to local_send_forward_buffer
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
self.local_send_forward_buffer.append(detached_output_obj)
|
||||||
|
else:
|
||||||
|
self.send_forward_buffer[model_chunk_id].append(detached_output_obj)
|
||||||
|
else: # chunk 1
|
||||||
|
# is first stage; end of fwd; do nothing
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.send_forward_buffer[model_chunk_id].append(detached_output_obj)
|
||||||
|
|
||||||
|
def schedule_b(
|
||||||
|
self,
|
||||||
|
scheduled_node,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
model_chunk_id: int,
|
||||||
|
optimizer: OptimizerWrapper,
|
||||||
|
):
|
||||||
|
"""A complete backward b schedule; Include recv bwd --> cal bwd step --> send bwd;
|
||||||
|
|
||||||
|
Args:
|
||||||
|
scheduled_node:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be run;
|
||||||
|
model_chunk_id (int): The current model chunk idx;
|
||||||
|
Returns:
|
||||||
|
Nothing.
|
||||||
|
"""
|
||||||
|
# Step1: recv bwd
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
# chunk0 is last stage; recv output_grad from local_send_backward_buffer
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
output_tensor_grad = self.local_send_backward_buffer.pop(0)
|
||||||
|
# chunk0 not last stage; recv output_grad from recv_backward_buffer
|
||||||
|
else:
|
||||||
|
output_tensor_grad = self.recv_backward_buffer[model_chunk_id].pop(0)
|
||||||
|
for h in output_tensor_grad[1]:
|
||||||
|
h.wait()
|
||||||
|
output_tensor_grad = output_tensor_grad[0]
|
||||||
|
else:
|
||||||
|
# chunk1, is first stage; recv LOSS from local send bwd buffer
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
output_tensor_grad = None
|
||||||
|
# chunk1, not first stage; recv output_grad from recv_backward_buffer
|
||||||
|
else:
|
||||||
|
output_tensor_grad = self.recv_backward_buffer[model_chunk_id].pop(0)
|
||||||
|
for h in output_tensor_grad[1]:
|
||||||
|
h.wait()
|
||||||
|
output_tensor_grad = output_tensor_grad[0]
|
||||||
|
|
||||||
|
# get input and output object from buffer;
|
||||||
|
input_obj = self.input_tensors[model_chunk_id].pop(0)
|
||||||
|
output_obj = self.output_tensors[model_chunk_id].pop(0)
|
||||||
|
|
||||||
|
input_object_grad = self.backward_b_step(
|
||||||
|
model_chunk=model_chunk,
|
||||||
|
model_chunk_id=model_chunk_id,
|
||||||
|
optimizer=optimizer,
|
||||||
|
input_obj=input_obj,
|
||||||
|
output_obj=output_obj,
|
||||||
|
output_obj_grad=output_tensor_grad,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Step3: send bwd
|
||||||
|
if model_chunk_id == 0:
|
||||||
|
# do nothing; end of bwd;
|
||||||
|
if self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
pass
|
||||||
|
# save input_object_grad to send_backward_buffer
|
||||||
|
else:
|
||||||
|
self.send_backward_buffer[model_chunk_id].append(input_object_grad)
|
||||||
|
else:
|
||||||
|
# send to local_send_backward_buffer
|
||||||
|
if self.stage_manager.is_last_stage(ignore_chunk=True):
|
||||||
|
self.local_send_backward_buffer.append(input_object_grad)
|
||||||
|
# send to next
|
||||||
|
else:
|
||||||
|
self.send_backward_buffer[model_chunk_id].append(input_object_grad)
|
||||||
|
WeightGradStore.flush(chunk=model_chunk_id)
|
||||||
|
|
||||||
|
def schedule_w(
|
||||||
|
self,
|
||||||
|
scheduled_node,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
model_chunk_id: int,
|
||||||
|
optimizer: OptimizerWrapper,
|
||||||
|
):
|
||||||
|
"""A complete backward w schedule; Include get y & dy from buffer --> cal bwd w step(cal dw & update w);
|
||||||
|
|
||||||
|
Args:
|
||||||
|
scheduled_node:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be run;
|
||||||
|
model_chunk_id (int): The current model chunk idx;
|
||||||
|
Returns:
|
||||||
|
Nothing.
|
||||||
|
"""
|
||||||
|
WeightGradStore.pop(chunk=model_chunk_id)
|
||||||
|
|
||||||
|
def run_forward_only(
|
||||||
|
self,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
data_iter: Iterable,
|
||||||
|
criterion: Callable[..., Any],
|
||||||
|
return_loss: bool = False,
|
||||||
|
return_outputs: bool = False,
|
||||||
|
) -> Dict:
|
||||||
|
assert self.forward_only
|
||||||
|
|
||||||
|
# prepare batch
|
||||||
|
self.load_batch(data_iter)
|
||||||
|
|
||||||
|
# prepare accum loss & output
|
||||||
|
accum_loss = None
|
||||||
|
|
||||||
|
# reset accum loss at fwd end;
|
||||||
|
if return_loss and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
accum_loss = torch.scalar_tensor(0, device=get_accelerator().get_current_device())
|
||||||
|
|
||||||
|
outputs = [] if return_outputs and self.stage_manager.is_first_stage(ignore_chunk=True) else None
|
||||||
|
|
||||||
|
# while we still have schedules_node in self.schedules
|
||||||
|
for it in range(len(self.schedules)):
|
||||||
|
scheduled_node = self.schedules[it]
|
||||||
|
|
||||||
|
if scheduled_node.type in {"RECV_FORWARD", "SEND_FORWARD"}:
|
||||||
|
# communication
|
||||||
|
communication_func = self.communication_map[scheduled_node.type]
|
||||||
|
communication_func(scheduled_node.chunk)
|
||||||
|
if scheduled_node.type == "F":
|
||||||
|
self.schedule_f(
|
||||||
|
scheduled_node=scheduled_node,
|
||||||
|
model_chunk=model_chunk,
|
||||||
|
model_chunk_id=scheduled_node.chunk,
|
||||||
|
criterion=criterion,
|
||||||
|
accum_loss=accum_loss,
|
||||||
|
outputs=outputs,
|
||||||
|
)
|
||||||
|
# return loss & output
|
||||||
|
if outputs is not None:
|
||||||
|
outputs = merge_batch(outputs)
|
||||||
|
return {"loss": accum_loss, "outputs": outputs}
|
||||||
|
|
||||||
|
def run_forward_backward(
|
||||||
|
self,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
data_iter: Iterable,
|
||||||
|
criterion: Callable[..., Any],
|
||||||
|
optimizer: Optional[OptimizerWrapper] = None,
|
||||||
|
return_loss: bool = False,
|
||||||
|
return_outputs: bool = False,
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Runs Zerobubble schedule, with communication between pipeline stages.
|
||||||
|
"""
|
||||||
|
# prepare batch
|
||||||
|
self.load_batch(data_iter)
|
||||||
|
|
||||||
|
# prepare accum loss & output
|
||||||
|
accum_loss = None
|
||||||
|
|
||||||
|
# reset accum loss at fwd end;
|
||||||
|
if return_loss and self.stage_manager.is_first_stage(ignore_chunk=True):
|
||||||
|
accum_loss = torch.scalar_tensor(0, device=get_accelerator().get_current_device())
|
||||||
|
|
||||||
|
outputs = [] if return_outputs and self.stage_manager.is_first_stage(ignore_chunk=True) else None
|
||||||
|
|
||||||
|
# while we still have schedules_node in self.schedules
|
||||||
|
schedule = self.schedules[self.stage_manager.stage] # get schedule by stage (rank)
|
||||||
|
for it in range(len(schedule)):
|
||||||
|
scheduled_node = schedule[it]
|
||||||
|
if scheduled_node.type in AUTO_SCHEDULE_COMMUNICATION_TYPES:
|
||||||
|
# communication
|
||||||
|
communication_func = self.communication_map[scheduled_node.type]
|
||||||
|
wait_handle = communication_func(scheduled_node.chunk)
|
||||||
|
# We wait recv handle in fwd step and bwd step. Here only need to wait for send handle
|
||||||
|
if scheduled_node.type in {"SEND_FORWARD", "SEND_BACKWARD"}:
|
||||||
|
self.wait_handles.append(wait_handle)
|
||||||
|
elif scheduled_node.type == "F":
|
||||||
|
self.schedule_f(
|
||||||
|
scheduled_node=scheduled_node,
|
||||||
|
model_chunk=model_chunk,
|
||||||
|
model_chunk_id=scheduled_node.chunk,
|
||||||
|
criterion=criterion,
|
||||||
|
accum_loss=accum_loss,
|
||||||
|
outputs=outputs,
|
||||||
|
)
|
||||||
|
elif scheduled_node.type == "B":
|
||||||
|
self.schedule_b(
|
||||||
|
scheduled_node=scheduled_node,
|
||||||
|
model_chunk=model_chunk,
|
||||||
|
model_chunk_id=scheduled_node.chunk,
|
||||||
|
optimizer=optimizer,
|
||||||
|
)
|
||||||
|
elif scheduled_node.type == "W":
|
||||||
|
self.schedule_w(
|
||||||
|
scheduled_node=scheduled_node,
|
||||||
|
model_chunk=model_chunk,
|
||||||
|
model_chunk_id=scheduled_node.chunk,
|
||||||
|
optimizer=optimizer,
|
||||||
|
)
|
||||||
|
# wait here to ensure all communication is done
|
||||||
|
for h in self.wait_handles:
|
||||||
|
for hh in h:
|
||||||
|
hh.wait()
|
||||||
|
# return loss & output
|
||||||
|
if outputs is not None:
|
||||||
|
outputs = merge_batch(outputs)
|
||||||
|
return {"loss": accum_loss, "outputs": outputs}
|
||||||
|
|
||||||
|
def forward_backward_step(
|
||||||
|
self,
|
||||||
|
model_chunk: Union[ModuleList, Module],
|
||||||
|
data_iter: Iterable,
|
||||||
|
criterion: Callable[..., Any],
|
||||||
|
optimizer: Optional[OptimizerWrapper] = None,
|
||||||
|
return_loss: bool = False,
|
||||||
|
return_outputs: bool = False,
|
||||||
|
) -> dict:
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
model_chunk (ModuleList or Module): Model Chunk to be trained. Original interleaved uses a module list whereas shardformer uses entire model + layer specification
|
||||||
|
data_iter (Iterable): Data iterator.
|
||||||
|
criterion (Callable[[Any, Any], Tensor]): Criterion to be used. It should take two arguments: model outputs and inputs, and returns loss tensor.
|
||||||
|
optimizer (OptimizerWrapper, optional): Optimizer to be used. Can be None when only forward is executed. Defaults to None.
|
||||||
|
return_loss (bool, optional): Whether to return loss. Defaults to False. Whether to return loss.
|
||||||
|
return_outputs (bool, optional): Whether to return model outputs. Defaults to False. Whether to return model outputs.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: A dict with keys: 'loss' and 'outputs'.
|
||||||
|
"""
|
||||||
|
self.forward_only = not torch.is_grad_enabled()
|
||||||
|
if optimizer is None:
|
||||||
|
assert self.forward_only, "Optimizer should be passed when doing backward."
|
||||||
|
|
||||||
|
if self.forward_only:
|
||||||
|
result = self.run_forward_only(model_chunk, data_iter, criterion, return_loss, return_outputs)
|
||||||
|
else:
|
||||||
|
result = self.run_forward_backward(
|
||||||
|
model_chunk, data_iter, criterion, optimizer, return_loss, return_outputs
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assert_buffer_empty()
|
||||||
|
return result
|
@ -0,0 +1,32 @@
|
|||||||
|
import queue
|
||||||
|
|
||||||
|
|
||||||
|
class WeightGradStore:
|
||||||
|
|
||||||
|
cache = []
|
||||||
|
weight_grad_queue = [queue.Queue(), queue.Queue()]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def put(cls, total_input, grad_output, weight, func):
|
||||||
|
# func(total_input, grad_output, weight.main_grad)
|
||||||
|
cls.cache.append((total_input, grad_output, weight, func))
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def flush(cls, chunk=0):
|
||||||
|
cls.weight_grad_queue[chunk].put(cls.cache)
|
||||||
|
cls.cache = []
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def pop(cls, chunk=0):
|
||||||
|
# print(f"chunk id {chunk} queue size {cls.weight_grad_queue[chunk].qsize()}")
|
||||||
|
if cls.weight_grad_queue[chunk].qsize() > 0:
|
||||||
|
stored_grads = cls.weight_grad_queue[chunk].get()
|
||||||
|
for total_input, grad_output, weight, func in stored_grads:
|
||||||
|
if weight.grad is not None:
|
||||||
|
func(total_input, grad_output, weight.grad)
|
||||||
|
# for first bwd; weight.grad is None, assign grad_weight to weight.grad
|
||||||
|
else:
|
||||||
|
grad_weight = func(total_input, grad_output)
|
||||||
|
weight.grad = grad_weight
|
||||||
|
else:
|
||||||
|
raise Exception("Pop empty queue.")
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue