2023-06-21 01:32:46 +00:00
|
|
|
import copy
|
2023-07-10 02:48:53 +00:00
|
|
|
from contextlib import nullcontext
|
2024-07-02 09:08:41 +00:00
|
|
|
from typing import Any, Callable, Dict, List, Optional, Type
|
2023-06-21 01:32:46 +00:00
|
|
|
|
2023-07-20 02:39:06 +00:00
|
|
|
import torch
|
2023-08-03 06:51:36 +00:00
|
|
|
import torch.distributed as dist
|
2023-08-03 09:50:15 +00:00
|
|
|
from torch import Tensor
|
|
|
|
from torch import distributed as dist
|
|
|
|
from torch.distributed import ProcessGroup
|
2023-07-20 02:39:06 +00:00
|
|
|
from torch.nn import Module
|
2023-08-03 09:50:15 +00:00
|
|
|
from torch.optim import Adam, Optimizer
|
2023-10-18 03:05:25 +00:00
|
|
|
from torch.testing import assert_close
|
2024-08-16 05:56:38 +00:00
|
|
|
from transformers.modeling_outputs import BaseModelOutputWithPast
|
2023-07-20 02:39:06 +00:00
|
|
|
|
2024-05-14 05:52:45 +00:00
|
|
|
from colossalai.accelerator import get_accelerator
|
2023-08-03 09:50:15 +00:00
|
|
|
from colossalai.booster import Booster
|
2024-05-14 05:52:45 +00:00
|
|
|
from colossalai.booster.plugin import HybridParallelPlugin, LowLevelZeroPlugin
|
2023-08-16 08:11:57 +00:00
|
|
|
from colossalai.booster.plugin.hybrid_parallel_plugin import HybridParallelModule
|
[shardformer] refactor embedding resize (#5603)
* [branch rebase] rebase main to Feature/resize_embedding (#5554)
* fix
* [release] update version (#5411)
* [hotfix] fix typo s/keywrods/keywords etc. (#5429)
* [devops] fix compatibility (#5444)
* [devops] fix compatibility
* [hotfix] update compatibility test on pr
* [devops] fix compatibility
* [devops] record duration during comp test
* [test] decrease test duration
* fix falcon
* [shardformer] fix gathering output when using tensor parallelism (#5431)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* [doc] release Open-Sora 1.0 with model weights (#5468)
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] update open-sora demo (#5479)
* [doc] update open-sora demo
* [doc] update open-sora demo
* [doc] update open-sora demo
* [example] add grok-1 inference (#5485)
* [misc] add submodule
* remove submodule
* [example] support grok-1 tp inference
* [example] add grok-1 inference script
* [example] refactor code
* [example] add grok-1 readme
* [exmaple] add test ci
* [exmaple] update readme
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* [CI] run pre-commit (#5577)
* fix
* [release] update version (#5411)
* [hotfix] fix typo s/keywrods/keywords etc. (#5429)
* [devops] fix compatibility (#5444)
* [devops] fix compatibility
* [hotfix] update compatibility test on pr
* [devops] fix compatibility
* [devops] record duration during comp test
* [test] decrease test duration
* fix falcon
* [shardformer] fix gathering output when using tensor parallelism (#5431)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* [doc] release Open-Sora 1.0 with model weights (#5468)
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] update open-sora demo (#5479)
* [doc] update open-sora demo
* [doc] update open-sora demo
* [doc] update open-sora demo
* [example] add grok-1 inference (#5485)
* [misc] add submodule
* remove submodule
* [example] support grok-1 tp inference
* [example] add grok-1 inference script
* [example] refactor code
* [example] add grok-1 readme
* [exmaple] add test ci
* [exmaple] update readme
* run pre-commit
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* [rebase] rebase main to resize-embedding (#5581)
* [release] grok-1 314b inference (#5490)
* [release] grok-1 inference
* [release] grok-1 inference
* [release] grok-1 inference
* [example] update Grok-1 inference (#5495)
* revise grok-1 example
* remove unused arg in scripts
* prevent re-installing torch
* update readme
* revert modifying colossalai requirements
* add perf
* trivial
* add tokenizer url
* [hotfix] set return_outputs=False in examples and polish code (#5404)
* fix: simplify merge_batch
* fix: use return_outputs=False to eliminate extra memory consumption
* feat: add return_outputs warning
* style: remove `return_outputs=False` as it is the default value
* [release] grok-1 inference benchmark (#5500)
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [shardformer]Fix lm parallel. (#5480)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* fix lm forward distribution
* fix
* test ci
* fix
* [fix] fix grok-1 example typo (#5506)
* [devops] fix example test ci (#5504)
* Fix ColoTensorSpec for py11 (#5440)
* fixed layout converter caching and updated tester
* Empty-Commit
* [shardformer] update colo attention to support custom mask (#5510)
* [feature] refactor colo attention (#5462)
* [extension] update api
* [feature] add colo attention
* [feature] update sdpa
* [feature] update npu attention
* [feature] update flash-attn
* [test] add flash attn test
* [test] update flash attn test
* [shardformer] update modeling to fit colo attention (#5465)
* [misc] refactor folder structure
* [shardformer] update llama flash-attn
* [shardformer] fix llama policy
* [devops] update tensornvme install
* [test] update llama test
* [shardformer] update colo attn kernel dispatch
* [shardformer] update blip2
* [shardformer] update chatglm
* [shardformer] update gpt2
* [shardformer] update gptj
* [shardformer] update opt
* [shardformer] update vit
* [shardformer] update colo attention mask prep
* [shardformer] update whisper
* [test] fix shardformer tests (#5514)
* [test] fix shardformer tests
* [test] fix shardformer tests
* [format] applied code formatting on changed files in pull request 5510 (#5517)
Co-authored-by: github-actions <github-actions@github.com>
* [shardformer] fix pipeline forward error if custom layer distribution is used (#5189)
* Use self.[distribute_layers|get_stage_index] to exploit custom layer distribution
* Change static methods for t5 layer distribution to member functions
* Change static methods for whisper layer distribution to member functions
* Replace whisper policy usage with self one
* Fix test case to use non-static layer distribution methods
* fix: fix typo
---------
Co-authored-by: Wenhao Chen <cwher@outlook.com>
* [Fix] Grok-1 use tokenizer from the same pretrained path (#5532)
* [fix] use tokenizer from the same pretrained path
* trust remote code
* [ColossalChat] Update RLHF V2 (#5286)
* Add dpo. Fix sft, ppo, lora. Refactor all
* fix and tested ppo
* 2 nd round refactor
* add ci tests
* fix ci
* fix ci
* fix readme, style
* fix readme style
* fix style, fix benchmark
* reproduce benchmark result, remove useless files
* rename to ColossalChat
* use new image
* fix ci workflow
* fix ci
* use local model/tokenizer for ci tests
* fix ci
* fix ci
* fix ci
* fix ci timeout
* fix rm progress bar. fix ci timeout
* fix ci
* fix ci typo
* remove 3d plugin from ci temporary
* test environment
* cannot save optimizer
* support chat template
* fix readme
* fix path
* test ci locally
* restore build_or_pr
* fix ci data path
* fix benchmark
* fix ci, move ci tests to 3080, disable fast tokenizer
* move ci to 85
* support flash attention 2
* add all-in-one data preparation script. Fix colossal-llama2-chat chat template
* add hardware requirements
* move ci test data
* fix save_model, add unwrap
* fix missing bos
* fix missing bos; support grad accumulation with gemini
* fix ci
* fix ci
* fix ci
* fix llama2 chat template config
* debug sft
* debug sft
* fix colossalai version requirement
* fix ci
* add sanity check to prevent NaN loss
* fix requirements
* add dummy data generation script
* add dummy data generation script
* add dummy data generation script
* add dummy data generation script
* update readme
* update readme
* update readme and ignore
* fix logger bug
* support parallel_output
* modify data preparation logic
* fix tokenization
* update lr
* fix inference
* run pre-commit
---------
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
* [shardformer, pipeline] add `gradient_checkpointing_ratio` and heterogenous shard policy for llama (#5508)
* feat: add `GradientCheckpointConfig` and `PipelineGradientCheckpointConfig`
* feat: apply `GradientCheckpointConfig` to policy and llama_forward
* feat: move `distribute_layer` and `get_stage_index` to PipelineStageManager
* fix: add optional args for `distribute_layer` and `get_stage_index`
* fix: fix changed API calls
* test: update llama tests
* style: polish `GradientCheckpointConfig`
* fix: fix pipeline utils tests
* fix incorrect sharding without zero (#5545)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [shardformer] Sequence Parallelism Optimization (#5533)
* sequence parallel optimization
* validate sequence parallel in llama (code to be polished)
* shardformer api writing
* integrate sequence parallel in ShardFormer
* fix pp bugs and sp bugs for LlaMa model
* integrating ring-based sequence parallelism into ShardFormer
* [sequence parallelism]: Add fused megatron function
* integrating ring-based sequence parallelism into ShardFormer
---------
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
* fix bugs when useing sp and flashattention together
* fix operation function name
* support flash attention for ulysses-style sp
* clarify sp process group
* fix compatibility bugs in moe plugin
* fix fused linear bugs
* fix linear layer test
* support gpt model all-to-all sp
* modify shard data dimension (meant to be dim=-1)
* support megtron-style sp and distributed attn for llama model
* [shardformer] add megatron sp to llama
* support llama7B 128k with distributed attention
* [shardformer] robustness enhancement
* add block attn
* sp mode 1: keep input as a complete sequence
* fix sp compatability
* finish sp mode 3 support for gpt
* using all_to_all_single when batch size is 1
* support mode 2 sp in gpt2 (#5)
* [shardformer] add megatron sp to llama
* support llama7B 128k with distributed attention
* [shardformer] robustness enhancement
* add block attn
* sp mode 1: keep input as a complete sequence
* fix sp compatability
* refactor ring implementation
* support mode 2 sp in gpt2
* polish code
* enable distributed attn mask when using sp mode 2 and 3 in llama
* automatically enable flash attn when using sp mode 2 and 3 in llama
* inplace attn mask
* add zero2 support for sequence parallel
* polish code
* fix bugs
* fix gemini checkpoint io
* loose tensor checking atol and rtol
* add comment
* fix llama layernorm grad
* fix zero grad
* fix zero grad
* fix conflict
* update split and gather auto grad func
* sequence parallel: inside text split (#6)
* polish code (part 1)
* polish code (part 2)
* polish code (part 2.5)
* polish code (part 3)
* sequence parallel: inside text split
* miscellaneous minor fixes
* polish code
* fix ulysses style ZeRO
* sequence parallel: inside text split
* miscellaneous minor fixes
* disaggregate sp group and dp group for sp
* fix llama and gpt sp
* polish code
* move ulysses grad sync to ddp (#9)
* remove zero_stage and unbind the grad sync for alltoall sp
* add 2d group creation test
* move ulysses grad sync to ddp
* add 2d group creation test
* remove useless code
* change shard config not to enable sp when enable_all_optimizations
* add sp warnings for several model
* remove useless code
---------
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
* [hotfix] quick fixes to make legacy tutorials runnable (#5559)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [fix] fix typo s/muiti-node /multi-node etc. (#5448)
* [hotfix] fix typo s/get_defualt_parser /get_default_parser (#5548)
* [devops] remove post commit ci (#5566)
* [devops] remove post commit ci
* [misc] run pre-commit on all files
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---------
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com>
Co-authored-by: Wenhao Chen <cwher@outlook.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: Rocky Duan <dementrock@users.noreply.github.com>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [shardformer]enable padding vocabulary size. (#5489)
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* padding vocab
* padding vocabe
* fix
* fix
* fxi
* test ci
* fix
fix
fix
fix
* fix
fix
* fix
* fix
* Update hybrid_parallel_plugin.py
fix
fix
fix
* fix
fix
* fix
fix
* fix
* resolve super init
resolve super init
resolve super init
resolve super init
* resolve comments
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* vocab checkpointio
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
fix
fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* padding vocab
* fix
* fix
fix
* fix
fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* cherry-pick
* revert moe modify
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
fix
fix
fix
fix
fix
fix
fix
* resolve comments
resolve comments
resolve comments
resolve comments
resolve comments
* ptensor
ptensor
resolve comments
fix
fix
fix
fix
fix
resolve comments
resolve comments
resolve comments
resolve comments
resolve comments
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix rebase
* fix rebase
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com>
Co-authored-by: Wenhao Chen <cwher@outlook.com>
Co-authored-by: Rocky Duan <dementrock@users.noreply.github.com>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2024-04-18 08:10:18 +00:00
|
|
|
from colossalai.checkpoint_io.utils import gather_distributed_param
|
2023-07-10 02:48:53 +00:00
|
|
|
from colossalai.lazy import LazyInitContext
|
2024-05-24 09:24:16 +00:00
|
|
|
from colossalai.nn.optimizer import GaLoreAdamW8bit
|
2024-05-14 05:52:45 +00:00
|
|
|
from colossalai.nn.optimizer.galore import get_galore_param_groups
|
2023-08-03 09:50:15 +00:00
|
|
|
from colossalai.pipeline.stage_manager import PipelineStageManager
|
2023-06-21 01:32:46 +00:00
|
|
|
from colossalai.shardformer import ShardConfig, ShardFormer
|
2023-08-03 06:51:36 +00:00
|
|
|
from colossalai.shardformer._utils import getattr_
|
2023-08-10 05:59:30 +00:00
|
|
|
from colossalai.shardformer.policies.auto_policy import Policy
|
2023-08-03 06:51:36 +00:00
|
|
|
from colossalai.tensor.d_tensor.api import is_customized_distributed_tensor, is_distributed_tensor
|
[shardformer] refactor embedding resize (#5603)
* [branch rebase] rebase main to Feature/resize_embedding (#5554)
* fix
* [release] update version (#5411)
* [hotfix] fix typo s/keywrods/keywords etc. (#5429)
* [devops] fix compatibility (#5444)
* [devops] fix compatibility
* [hotfix] update compatibility test on pr
* [devops] fix compatibility
* [devops] record duration during comp test
* [test] decrease test duration
* fix falcon
* [shardformer] fix gathering output when using tensor parallelism (#5431)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* [doc] release Open-Sora 1.0 with model weights (#5468)
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] update open-sora demo (#5479)
* [doc] update open-sora demo
* [doc] update open-sora demo
* [doc] update open-sora demo
* [example] add grok-1 inference (#5485)
* [misc] add submodule
* remove submodule
* [example] support grok-1 tp inference
* [example] add grok-1 inference script
* [example] refactor code
* [example] add grok-1 readme
* [exmaple] add test ci
* [exmaple] update readme
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* [CI] run pre-commit (#5577)
* fix
* [release] update version (#5411)
* [hotfix] fix typo s/keywrods/keywords etc. (#5429)
* [devops] fix compatibility (#5444)
* [devops] fix compatibility
* [hotfix] update compatibility test on pr
* [devops] fix compatibility
* [devops] record duration during comp test
* [test] decrease test duration
* fix falcon
* [shardformer] fix gathering output when using tensor parallelism (#5431)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* [doc] release Open-Sora 1.0 with model weights (#5468)
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] update open-sora demo (#5479)
* [doc] update open-sora demo
* [doc] update open-sora demo
* [doc] update open-sora demo
* [example] add grok-1 inference (#5485)
* [misc] add submodule
* remove submodule
* [example] support grok-1 tp inference
* [example] add grok-1 inference script
* [example] refactor code
* [example] add grok-1 readme
* [exmaple] add test ci
* [exmaple] update readme
* run pre-commit
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* [rebase] rebase main to resize-embedding (#5581)
* [release] grok-1 314b inference (#5490)
* [release] grok-1 inference
* [release] grok-1 inference
* [release] grok-1 inference
* [example] update Grok-1 inference (#5495)
* revise grok-1 example
* remove unused arg in scripts
* prevent re-installing torch
* update readme
* revert modifying colossalai requirements
* add perf
* trivial
* add tokenizer url
* [hotfix] set return_outputs=False in examples and polish code (#5404)
* fix: simplify merge_batch
* fix: use return_outputs=False to eliminate extra memory consumption
* feat: add return_outputs warning
* style: remove `return_outputs=False` as it is the default value
* [release] grok-1 inference benchmark (#5500)
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [shardformer]Fix lm parallel. (#5480)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* fix lm forward distribution
* fix
* test ci
* fix
* [fix] fix grok-1 example typo (#5506)
* [devops] fix example test ci (#5504)
* Fix ColoTensorSpec for py11 (#5440)
* fixed layout converter caching and updated tester
* Empty-Commit
* [shardformer] update colo attention to support custom mask (#5510)
* [feature] refactor colo attention (#5462)
* [extension] update api
* [feature] add colo attention
* [feature] update sdpa
* [feature] update npu attention
* [feature] update flash-attn
* [test] add flash attn test
* [test] update flash attn test
* [shardformer] update modeling to fit colo attention (#5465)
* [misc] refactor folder structure
* [shardformer] update llama flash-attn
* [shardformer] fix llama policy
* [devops] update tensornvme install
* [test] update llama test
* [shardformer] update colo attn kernel dispatch
* [shardformer] update blip2
* [shardformer] update chatglm
* [shardformer] update gpt2
* [shardformer] update gptj
* [shardformer] update opt
* [shardformer] update vit
* [shardformer] update colo attention mask prep
* [shardformer] update whisper
* [test] fix shardformer tests (#5514)
* [test] fix shardformer tests
* [test] fix shardformer tests
* [format] applied code formatting on changed files in pull request 5510 (#5517)
Co-authored-by: github-actions <github-actions@github.com>
* [shardformer] fix pipeline forward error if custom layer distribution is used (#5189)
* Use self.[distribute_layers|get_stage_index] to exploit custom layer distribution
* Change static methods for t5 layer distribution to member functions
* Change static methods for whisper layer distribution to member functions
* Replace whisper policy usage with self one
* Fix test case to use non-static layer distribution methods
* fix: fix typo
---------
Co-authored-by: Wenhao Chen <cwher@outlook.com>
* [Fix] Grok-1 use tokenizer from the same pretrained path (#5532)
* [fix] use tokenizer from the same pretrained path
* trust remote code
* [ColossalChat] Update RLHF V2 (#5286)
* Add dpo. Fix sft, ppo, lora. Refactor all
* fix and tested ppo
* 2 nd round refactor
* add ci tests
* fix ci
* fix ci
* fix readme, style
* fix readme style
* fix style, fix benchmark
* reproduce benchmark result, remove useless files
* rename to ColossalChat
* use new image
* fix ci workflow
* fix ci
* use local model/tokenizer for ci tests
* fix ci
* fix ci
* fix ci
* fix ci timeout
* fix rm progress bar. fix ci timeout
* fix ci
* fix ci typo
* remove 3d plugin from ci temporary
* test environment
* cannot save optimizer
* support chat template
* fix readme
* fix path
* test ci locally
* restore build_or_pr
* fix ci data path
* fix benchmark
* fix ci, move ci tests to 3080, disable fast tokenizer
* move ci to 85
* support flash attention 2
* add all-in-one data preparation script. Fix colossal-llama2-chat chat template
* add hardware requirements
* move ci test data
* fix save_model, add unwrap
* fix missing bos
* fix missing bos; support grad accumulation with gemini
* fix ci
* fix ci
* fix ci
* fix llama2 chat template config
* debug sft
* debug sft
* fix colossalai version requirement
* fix ci
* add sanity check to prevent NaN loss
* fix requirements
* add dummy data generation script
* add dummy data generation script
* add dummy data generation script
* add dummy data generation script
* update readme
* update readme
* update readme and ignore
* fix logger bug
* support parallel_output
* modify data preparation logic
* fix tokenization
* update lr
* fix inference
* run pre-commit
---------
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
* [shardformer, pipeline] add `gradient_checkpointing_ratio` and heterogenous shard policy for llama (#5508)
* feat: add `GradientCheckpointConfig` and `PipelineGradientCheckpointConfig`
* feat: apply `GradientCheckpointConfig` to policy and llama_forward
* feat: move `distribute_layer` and `get_stage_index` to PipelineStageManager
* fix: add optional args for `distribute_layer` and `get_stage_index`
* fix: fix changed API calls
* test: update llama tests
* style: polish `GradientCheckpointConfig`
* fix: fix pipeline utils tests
* fix incorrect sharding without zero (#5545)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [shardformer] Sequence Parallelism Optimization (#5533)
* sequence parallel optimization
* validate sequence parallel in llama (code to be polished)
* shardformer api writing
* integrate sequence parallel in ShardFormer
* fix pp bugs and sp bugs for LlaMa model
* integrating ring-based sequence parallelism into ShardFormer
* [sequence parallelism]: Add fused megatron function
* integrating ring-based sequence parallelism into ShardFormer
---------
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
* fix bugs when useing sp and flashattention together
* fix operation function name
* support flash attention for ulysses-style sp
* clarify sp process group
* fix compatibility bugs in moe plugin
* fix fused linear bugs
* fix linear layer test
* support gpt model all-to-all sp
* modify shard data dimension (meant to be dim=-1)
* support megtron-style sp and distributed attn for llama model
* [shardformer] add megatron sp to llama
* support llama7B 128k with distributed attention
* [shardformer] robustness enhancement
* add block attn
* sp mode 1: keep input as a complete sequence
* fix sp compatability
* finish sp mode 3 support for gpt
* using all_to_all_single when batch size is 1
* support mode 2 sp in gpt2 (#5)
* [shardformer] add megatron sp to llama
* support llama7B 128k with distributed attention
* [shardformer] robustness enhancement
* add block attn
* sp mode 1: keep input as a complete sequence
* fix sp compatability
* refactor ring implementation
* support mode 2 sp in gpt2
* polish code
* enable distributed attn mask when using sp mode 2 and 3 in llama
* automatically enable flash attn when using sp mode 2 and 3 in llama
* inplace attn mask
* add zero2 support for sequence parallel
* polish code
* fix bugs
* fix gemini checkpoint io
* loose tensor checking atol and rtol
* add comment
* fix llama layernorm grad
* fix zero grad
* fix zero grad
* fix conflict
* update split and gather auto grad func
* sequence parallel: inside text split (#6)
* polish code (part 1)
* polish code (part 2)
* polish code (part 2.5)
* polish code (part 3)
* sequence parallel: inside text split
* miscellaneous minor fixes
* polish code
* fix ulysses style ZeRO
* sequence parallel: inside text split
* miscellaneous minor fixes
* disaggregate sp group and dp group for sp
* fix llama and gpt sp
* polish code
* move ulysses grad sync to ddp (#9)
* remove zero_stage and unbind the grad sync for alltoall sp
* add 2d group creation test
* move ulysses grad sync to ddp
* add 2d group creation test
* remove useless code
* change shard config not to enable sp when enable_all_optimizations
* add sp warnings for several model
* remove useless code
---------
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
* [hotfix] quick fixes to make legacy tutorials runnable (#5559)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [fix] fix typo s/muiti-node /multi-node etc. (#5448)
* [hotfix] fix typo s/get_defualt_parser /get_default_parser (#5548)
* [devops] remove post commit ci (#5566)
* [devops] remove post commit ci
* [misc] run pre-commit on all files
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---------
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com>
Co-authored-by: Wenhao Chen <cwher@outlook.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: Rocky Duan <dementrock@users.noreply.github.com>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [shardformer]enable padding vocabulary size. (#5489)
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* padding vocab
* padding vocabe
* fix
* fix
* fxi
* test ci
* fix
fix
fix
fix
* fix
fix
* fix
* fix
* Update hybrid_parallel_plugin.py
fix
fix
fix
* fix
fix
* fix
fix
* fix
* resolve super init
resolve super init
resolve super init
resolve super init
* resolve comments
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* vocab checkpointio
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
fix
fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* padding vocab
* fix
* fix
fix
* fix
fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* cherry-pick
* revert moe modify
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
fix
fix
fix
fix
fix
fix
fix
* resolve comments
resolve comments
resolve comments
resolve comments
resolve comments
* ptensor
ptensor
resolve comments
fix
fix
fix
fix
fix
resolve comments
resolve comments
resolve comments
resolve comments
resolve comments
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix rebase
* fix rebase
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com>
Co-authored-by: Wenhao Chen <cwher@outlook.com>
Co-authored-by: Rocky Duan <dementrock@users.noreply.github.com>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2024-04-18 08:10:18 +00:00
|
|
|
from colossalai.tensor.padded_tensor.api import is_padded_tensor, to_unpadded_tensor
|
2023-06-21 01:32:46 +00:00
|
|
|
|
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
def build_model(
|
|
|
|
model_fn,
|
|
|
|
enable_fused_normalization=True,
|
|
|
|
enable_tensor_parallelism=True,
|
|
|
|
enable_flash_attention=False,
|
|
|
|
enable_jit_fused=False,
|
|
|
|
enable_sequence_parallelism=False,
|
|
|
|
use_lazy_init: bool = False,
|
2024-03-27 03:19:32 +00:00
|
|
|
dtype=torch.float32,
|
2023-09-19 06:20:26 +00:00
|
|
|
):
|
2023-08-07 08:41:07 +00:00
|
|
|
# create new model
|
2023-07-10 02:48:53 +00:00
|
|
|
ctx = LazyInitContext() if use_lazy_init else nullcontext()
|
|
|
|
with ctx:
|
|
|
|
# create new model
|
|
|
|
org_model = model_fn()
|
|
|
|
model_copy = copy.deepcopy(org_model)
|
|
|
|
if use_lazy_init:
|
|
|
|
ctx.materialize(org_model)
|
2023-06-21 01:32:46 +00:00
|
|
|
# shard model
|
2023-09-19 06:20:26 +00:00
|
|
|
shard_config = ShardConfig(
|
|
|
|
enable_fused_normalization=enable_fused_normalization,
|
|
|
|
enable_tensor_parallelism=enable_tensor_parallelism,
|
|
|
|
enable_flash_attention=enable_flash_attention,
|
|
|
|
enable_jit_fused=enable_jit_fused,
|
|
|
|
enable_sequence_parallelism=enable_sequence_parallelism,
|
|
|
|
)
|
2023-08-07 08:41:07 +00:00
|
|
|
model_copy = copy.deepcopy(org_model)
|
2023-06-21 01:32:46 +00:00
|
|
|
shard_former = ShardFormer(shard_config=shard_config)
|
2023-07-05 06:30:17 +00:00
|
|
|
sharded_model, shared_params = shard_former.optimize(model_copy)
|
2024-03-27 03:19:32 +00:00
|
|
|
return org_model.cuda().to(dtype), sharded_model.cuda().to(dtype)
|
2023-06-21 01:32:46 +00:00
|
|
|
|
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
def build_pipeline_model(
|
|
|
|
model_fn,
|
|
|
|
stage_manager=None,
|
|
|
|
enable_fused_normalization=False,
|
|
|
|
enable_tensor_parallelism=False,
|
|
|
|
use_lazy_init: bool = False,
|
|
|
|
policy: Optional[Policy] = None,
|
|
|
|
):
|
2023-07-10 05:58:58 +00:00
|
|
|
ctx = LazyInitContext() if use_lazy_init else nullcontext()
|
|
|
|
with ctx:
|
|
|
|
# create new model
|
|
|
|
org_model = model_fn()
|
|
|
|
model_copy = copy.deepcopy(org_model)
|
|
|
|
if use_lazy_init:
|
|
|
|
ctx.materialize(org_model)
|
|
|
|
|
|
|
|
# shard model
|
2023-09-19 06:20:26 +00:00
|
|
|
shard_config = ShardConfig(
|
|
|
|
enable_fused_normalization=enable_fused_normalization,
|
|
|
|
enable_tensor_parallelism=enable_tensor_parallelism,
|
|
|
|
pipeline_stage_manager=stage_manager,
|
|
|
|
)
|
2023-07-11 03:37:26 +00:00
|
|
|
|
2023-07-10 05:58:58 +00:00
|
|
|
shard_former = ShardFormer(shard_config=shard_config)
|
2023-08-04 06:55:31 +00:00
|
|
|
sharded_model, shared_params = shard_former.optimize(model_copy, policy=policy)
|
2023-07-10 05:58:58 +00:00
|
|
|
return org_model.cuda(), sharded_model.cuda()
|
|
|
|
|
|
|
|
|
2023-06-21 01:32:46 +00:00
|
|
|
def run_forward(original_model, sharded_model, data_gen_fn, output_transform_fn, loss_fn):
|
|
|
|
# prepare input
|
|
|
|
data = data_gen_fn()
|
|
|
|
data = {k: v.cuda() for k, v in data.items()}
|
|
|
|
# switch to train mode
|
|
|
|
original_model.train()
|
|
|
|
sharded_model.train()
|
|
|
|
# run forward
|
|
|
|
org_output = original_model(**data)
|
|
|
|
org_output = output_transform_fn(org_output)
|
|
|
|
org_loss = loss_fn(org_output)
|
|
|
|
|
|
|
|
shard_output = sharded_model(**data)
|
|
|
|
shard_output = output_transform_fn(shard_output)
|
|
|
|
shard_loss = loss_fn(shard_output)
|
2023-06-30 01:32:37 +00:00
|
|
|
return org_output, org_loss, shard_output, shard_loss
|
2023-07-20 02:39:06 +00:00
|
|
|
|
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
def check_state_dict(org_model: Module, sharded_model: Module, name: str = ""):
|
2023-07-20 02:39:06 +00:00
|
|
|
org_sd = org_model.state_dict()
|
|
|
|
shard_sd = sharded_model.state_dict()
|
|
|
|
for k, v in org_sd.items():
|
2023-09-19 06:20:26 +00:00
|
|
|
assert k in shard_sd, f"{name} {k} not in sharded model"
|
2023-07-20 02:39:06 +00:00
|
|
|
shard_v = shard_sd[k]
|
2023-09-19 06:20:26 +00:00
|
|
|
assert v.shape == shard_v.shape, f"{name} {k} shape mismatch, {v.shape} vs {shard_v.shape}"
|
|
|
|
assert v.dtype == shard_v.dtype, f"{name} {k} dtype mismatch, {v.dtype} vs {shard_v.dtype}"
|
|
|
|
assert torch.equal(v, shard_v), f"{name} {k} value mismatch"
|
2023-08-03 06:51:36 +00:00
|
|
|
|
|
|
|
|
2024-05-14 05:52:45 +00:00
|
|
|
def build_model_from_hybrid_plugin(
|
2024-07-02 09:08:41 +00:00
|
|
|
model_fn: Callable,
|
|
|
|
loss_fn: Callable,
|
|
|
|
test_config: Dict[str, Any],
|
|
|
|
optim_class=Adam,
|
|
|
|
sharded_optim_class=Adam,
|
|
|
|
pluggin_cls: Type[HybridParallelPlugin] = HybridParallelPlugin,
|
2024-05-14 05:52:45 +00:00
|
|
|
):
|
2023-08-03 09:50:15 +00:00
|
|
|
use_lazy_init = False
|
2023-09-19 06:20:26 +00:00
|
|
|
if "use_lazy_init" in test_config:
|
|
|
|
use_lazy_init = test_config.pop("use_lazy_init")
|
2023-08-03 09:50:15 +00:00
|
|
|
|
2023-08-11 07:43:23 +00:00
|
|
|
ctx = LazyInitContext() if use_lazy_init else nullcontext()
|
2023-08-03 09:50:15 +00:00
|
|
|
with ctx:
|
2023-08-11 07:43:23 +00:00
|
|
|
org_model = model_fn()
|
2023-08-03 09:50:15 +00:00
|
|
|
sharded_model = copy.deepcopy(org_model)
|
|
|
|
if use_lazy_init:
|
2023-08-11 07:43:23 +00:00
|
|
|
ctx.materialize(org_model)
|
|
|
|
org_model = org_model.cuda()
|
2024-05-24 09:24:16 +00:00
|
|
|
if optim_class == GaLoreAdamW8bit:
|
2024-05-14 05:52:45 +00:00
|
|
|
# Disable clipping and block-wise quantization
|
|
|
|
org_optimizer = optim_class(
|
|
|
|
get_galore_param_groups(org_model, weight_decay=0, rank=4),
|
|
|
|
lr=1e-3,
|
|
|
|
percentile_clipping=101,
|
|
|
|
block_wise=False,
|
|
|
|
min_8bit_size=1e10,
|
|
|
|
)
|
|
|
|
sharded_optimizer = sharded_optim_class(
|
|
|
|
get_galore_param_groups(sharded_model, weight_decay=0, rank=4),
|
|
|
|
lr=1e-3,
|
|
|
|
percentile_clipping=101,
|
|
|
|
block_wise=False,
|
|
|
|
min_8bit_size=1e10,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
org_optimizer = optim_class(org_model.parameters(), lr=1e-3)
|
|
|
|
sharded_optimizer = sharded_optim_class(sharded_model.parameters(), lr=1e-3)
|
2024-07-02 09:08:41 +00:00
|
|
|
|
2023-08-03 09:50:15 +00:00
|
|
|
criterion = loss_fn
|
2024-07-02 09:08:41 +00:00
|
|
|
plugin = pluggin_cls(**test_config)
|
2023-08-11 07:43:23 +00:00
|
|
|
booster = Booster(plugin=plugin)
|
2023-08-03 09:50:15 +00:00
|
|
|
|
2023-08-11 07:43:23 +00:00
|
|
|
sharded_model, sharded_optimizer, criterion, _, _ = booster.boost(sharded_model, sharded_optimizer, criterion)
|
2024-03-27 03:19:32 +00:00
|
|
|
return (
|
|
|
|
org_model,
|
|
|
|
org_optimizer,
|
|
|
|
sharded_model,
|
|
|
|
sharded_optimizer,
|
|
|
|
criterion,
|
|
|
|
booster,
|
|
|
|
)
|
2023-08-03 09:50:15 +00:00
|
|
|
|
|
|
|
|
2024-05-14 05:52:45 +00:00
|
|
|
def build_model_from_low_level_zero_plugin(
|
|
|
|
model_fn: Callable, loss_fn: Callable, test_config: Dict[str, Any], optim_class=Adam, sharded_optim_class=Adam
|
|
|
|
):
|
|
|
|
use_lazy_init = False
|
|
|
|
if "use_lazy_init" in test_config:
|
|
|
|
use_lazy_init = test_config.pop("use_lazy_init")
|
|
|
|
|
|
|
|
ctx = LazyInitContext() if use_lazy_init else nullcontext()
|
|
|
|
with ctx:
|
|
|
|
org_model = model_fn()
|
|
|
|
sharded_model = copy.deepcopy(org_model)
|
|
|
|
if use_lazy_init:
|
|
|
|
ctx.materialize(org_model)
|
|
|
|
|
|
|
|
org_model = org_model.cuda()
|
|
|
|
org_optimizer = optim_class(org_model.parameters(), lr=1e-3)
|
|
|
|
sharded_optimizer = sharded_optim_class(sharded_model.parameters(), lr=1e-3)
|
|
|
|
criterion = loss_fn
|
|
|
|
|
|
|
|
plugin = LowLevelZeroPlugin(**test_config)
|
|
|
|
booster = Booster(plugin=plugin)
|
|
|
|
|
|
|
|
sharded_model, sharded_optimizer, criterion, _, _ = booster.boost(sharded_model, sharded_optimizer, criterion)
|
|
|
|
return org_model, org_optimizer, sharded_model, sharded_optimizer, criterion, booster
|
|
|
|
|
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
def run_forward_backward_with_hybrid_plugin(
|
|
|
|
org_model: Module,
|
|
|
|
sharded_model: Module,
|
|
|
|
sharded_optimizer: Optimizer,
|
|
|
|
data_gen_fn: Callable,
|
|
|
|
output_transform_fn: Callable,
|
|
|
|
criterion: Callable,
|
|
|
|
booster: Booster,
|
|
|
|
):
|
2023-08-11 02:32:53 +00:00
|
|
|
org_model.cuda()
|
|
|
|
sharded_model.cuda()
|
2023-08-03 09:50:15 +00:00
|
|
|
|
|
|
|
def _criterion(outputs, inputs):
|
|
|
|
outputs = output_transform_fn(outputs)
|
|
|
|
loss = criterion(outputs)
|
|
|
|
return loss
|
|
|
|
|
|
|
|
data = data_gen_fn()
|
2023-08-16 07:41:20 +00:00
|
|
|
|
2024-04-03 09:15:47 +00:00
|
|
|
shard_test_data = {}
|
|
|
|
for k, v in data.items():
|
|
|
|
shard_test_data[k] = data[k].clone()
|
|
|
|
unshard_test_data = {}
|
|
|
|
for k, v in data.items():
|
|
|
|
unshard_test_data[k] = data[k].clone()
|
2023-08-16 07:41:20 +00:00
|
|
|
|
2023-08-03 09:50:15 +00:00
|
|
|
sharded_model.train()
|
|
|
|
if booster.plugin.stage_manager is not None:
|
2024-04-03 09:15:47 +00:00
|
|
|
for k, v in shard_test_data.items():
|
2023-09-19 06:20:26 +00:00
|
|
|
if torch.is_tensor(v) or "Tensor" in v.__class__.__name__:
|
2023-08-11 07:43:23 +00:00
|
|
|
new_shape = [1] * v.dim()
|
|
|
|
new_shape[0] = 4
|
2024-04-03 09:15:47 +00:00
|
|
|
shard_test_data[k] = v.to("cuda").repeat(*new_shape)
|
2023-08-11 07:43:23 +00:00
|
|
|
|
2024-04-03 09:15:47 +00:00
|
|
|
data_iter = iter([shard_test_data])
|
2023-09-19 06:20:26 +00:00
|
|
|
sharded_output = booster.execute_pipeline(
|
2024-03-27 03:19:32 +00:00
|
|
|
data_iter,
|
|
|
|
sharded_model,
|
|
|
|
_criterion,
|
|
|
|
sharded_optimizer,
|
|
|
|
return_loss=True,
|
|
|
|
return_outputs=True,
|
2023-09-19 06:20:26 +00:00
|
|
|
)
|
|
|
|
sharded_loss = sharded_output["loss"]
|
2023-08-10 05:59:30 +00:00
|
|
|
|
2024-04-03 09:15:47 +00:00
|
|
|
else:
|
|
|
|
shard_test_data = {k: v.cuda() for k, v in shard_test_data.items()}
|
|
|
|
sharded_output = sharded_model(**shard_test_data)
|
2023-08-03 09:50:15 +00:00
|
|
|
sharded_loss = criterion(sharded_output)
|
2023-08-10 05:59:30 +00:00
|
|
|
sharded_optimizer.backward(sharded_loss)
|
2023-08-03 09:50:15 +00:00
|
|
|
|
|
|
|
org_model.train()
|
2024-04-03 09:15:47 +00:00
|
|
|
if booster.plugin.stage_manager is not None:
|
|
|
|
for k, v in unshard_test_data.items():
|
|
|
|
if torch.is_tensor(v) or "Tensor" in v.__class__.__name__:
|
|
|
|
new_shape = [1] * v.dim()
|
|
|
|
new_shape[0] = 4
|
|
|
|
unshard_test_data[k] = v.to("cuda").repeat(*new_shape)
|
|
|
|
unshard_test_data = {k: v.cuda() for k, v in unshard_test_data.items()}
|
|
|
|
org_output = org_model(**unshard_test_data)
|
2023-08-03 09:50:15 +00:00
|
|
|
org_loss = criterion(org_output)
|
|
|
|
org_loss.backward()
|
|
|
|
return org_loss, org_output, sharded_loss, sharded_output
|
|
|
|
|
|
|
|
|
2024-05-14 05:52:45 +00:00
|
|
|
def run_forward_backward_with_low_level_zero_plugin(
|
|
|
|
org_model: Module,
|
|
|
|
sharded_model: Module,
|
|
|
|
sharded_optimizer: Optimizer,
|
|
|
|
data_gen_fn: Callable,
|
|
|
|
output_transform_fn: Callable,
|
|
|
|
criterion: Callable,
|
|
|
|
booster: Booster,
|
|
|
|
):
|
|
|
|
get_accelerator().get_current_device()
|
|
|
|
org_model.cuda()
|
|
|
|
sharded_model.cuda()
|
|
|
|
|
|
|
|
def _criterion(outputs, inputs):
|
|
|
|
outputs = output_transform_fn(outputs)
|
|
|
|
loss = criterion(outputs)
|
|
|
|
return loss
|
|
|
|
|
|
|
|
data = data_gen_fn()
|
|
|
|
|
|
|
|
# data = {
|
|
|
|
# k: v.to(device) if torch.is_tensor(v) or "Tensor" in v.__class__.__name__ else v for k, v in data.items()
|
|
|
|
# }
|
|
|
|
data = {k: v.cuda() for k, v in data.items()}
|
|
|
|
|
|
|
|
sharded_model.train()
|
|
|
|
sharded_output = sharded_model(**data)
|
|
|
|
sharded_loss = criterion(sharded_output)
|
|
|
|
sharded_optimizer.backward(sharded_loss)
|
|
|
|
|
|
|
|
org_model.train()
|
|
|
|
org_output = org_model(**data)
|
|
|
|
org_loss = criterion(org_output)
|
|
|
|
org_loss.backward()
|
|
|
|
|
|
|
|
return org_loss, org_output, sharded_loss, sharded_output
|
|
|
|
|
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
def check_output_hidden_state(
|
2024-08-16 05:56:38 +00:00
|
|
|
org_output: BaseModelOutputWithPast,
|
|
|
|
sharded_output: BaseModelOutputWithPast,
|
2023-09-19 06:20:26 +00:00
|
|
|
stage_manager: Optional[PipelineStageManager] = None,
|
|
|
|
atol: float = 1e-5,
|
|
|
|
rtol: float = 1e-3,
|
2024-08-16 05:56:38 +00:00
|
|
|
shard_config: Optional[ShardConfig] = None,
|
2023-09-19 06:20:26 +00:00
|
|
|
):
|
2023-08-03 09:50:15 +00:00
|
|
|
org_hidden_state = org_output.last_hidden_state
|
|
|
|
|
[plugin] hybrid support zero bubble pipeline (#6060)
* hybrid support zbv
* fix
fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* Update zero_bubble_pp.py
* fix
* fix-ci
* fix
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* fix
* fix
* fix
* [zerobubble]Support ZeroBubble Pipeline (#6034)
* [feat] add zerobubble pp (just a frame now); add POC test for dx_dw; add test for zerobubble;
* [feat] add dw test;
* [fix] fix weight not close;
* [update] update text;
* [feat] add test run_fwd_bwd automatic scheduling;
* [feat] split communication and calculation; fix pop empty send_bwd_buffer error;
* [feat] add test for p & p grad;
* [feat] add comments for ZBV func;
* [fix] rm useless assign and comments;
* [fix] fix ci test; add pytest;
* [feat] add run_fwd_bwd_with_microbatch (replace input) & test; add p&p.grad assert close test & all pass;
* [feat] add apply v_schedule graph; p & p.grad assert err exist;
* [fix] update
* [feat] fix ci; add assert;
* [feat] fix poc format
* [feat] fix func name & ci; add comments;
* [fix] fix poc test; add comments in poc;
* [feat] add optim backward_b_by_grad
* [feat] fix optimizer bwd b & w; support return accum loss & output
* [feat] add fwd_bwd_step, run_fwd_only;
* [fix] fix optim bwd; add license for v_schedule; remove redundant attributes; fix schedule loop "while"--> "for"; add communication dict;
* [fix] fix communication_map;
* [feat] update test; rm comments;
* [fix] rm zbv in hybridplugin
* [fix] fix optim bwd;
* [fix] fix optim bwd;
* [fix] rm output.data after send fwd;
* [fix] fix bwd step if condition; remove useless comments and format info;
* [fix] fix detach output & release output;
* [fix] rm requir_grad for output;
* [fix] fix requir grad position and detach position and input&output local buffer append position;
* [feat] add memory assertation;
* [fix] fix mem check;
* [fix] mem assertation'
* [fix] fix mem assertation
* [fix] fix mem; use a new model shape; only assert mem less and equal than theo;
* [fix] fix model zoo import;
* [fix] fix redundant detach & clone; add buffer assertation in the end;
* [fix] add output_obj_grad assert None at bwd b step; replace input_obj.require_grad_ with treemap;
* [fix] update optim state dict assert (include param group & state); fix mem assert after add optim;
* [fix] add testcase with microbatch 4;
* hybrid support zbv
* fix
fix
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Update zero_bubble_pp.py
* fix
* fix-ci
* fix
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
fix
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* fix
* fix
* fix
* fix
* fix
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* fix
* fix
* fix
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: duanjunwen <935724073@qq.com>
2024-09-27 06:48:55 +00:00
|
|
|
if stage_manager:
|
|
|
|
if stage_manager.use_zbv:
|
|
|
|
if stage_manager.is_first_stage(ignore_chunk=True):
|
|
|
|
sharded_hidden_state = sharded_output["outputs"]["last_hidden_state"]
|
|
|
|
else:
|
|
|
|
sharded_hidden_state = sharded_output.last_hidden_state
|
|
|
|
elif stage_manager.is_last_stage(ignore_chunk=True):
|
|
|
|
sharded_hidden_state = sharded_output["outputs"]["last_hidden_state"]
|
|
|
|
else:
|
|
|
|
sharded_hidden_state = sharded_output.last_hidden_state
|
2023-09-04 09:52:23 +00:00
|
|
|
else:
|
|
|
|
sharded_hidden_state = sharded_output.last_hidden_state
|
2023-08-03 09:50:15 +00:00
|
|
|
|
2024-08-16 05:56:38 +00:00
|
|
|
# Check if the output sequence is gathered before cross entropy
|
|
|
|
if shard_config is not None:
|
|
|
|
seq_dim = 1
|
|
|
|
sp_group = shard_config.sequence_parallel_process_group
|
|
|
|
sp_size = shard_config.sequence_parallel_size
|
|
|
|
if org_hidden_state.shape[seq_dim] == sharded_hidden_state.shape[seq_dim] * sp_size:
|
|
|
|
org_hidden_state = org_hidden_state.chunk(sp_size, dim=seq_dim)[dist.get_rank(sp_group)]
|
2023-10-18 03:05:25 +00:00
|
|
|
assert_close(org_hidden_state.float(), sharded_hidden_state.float(), atol=atol, rtol=rtol)
|
2023-08-03 09:50:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
def check_loss(org_loss: Tensor, sharded_loss: Tensor, atol: float = 1e-5, rtol: float = 1e-3):
|
2024-04-25 06:41:17 +00:00
|
|
|
assert_close(org_loss.float(), sharded_loss.float(), atol=atol, rtol=rtol)
|
2023-09-19 06:20:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
def check_weight(
|
|
|
|
org_model: Module,
|
|
|
|
sharded_model: Module,
|
|
|
|
layer_suffix: List[str],
|
|
|
|
tp_group: Optional[ProcessGroup] = None,
|
|
|
|
dim: int = 0,
|
|
|
|
atol: float = 1e-5,
|
|
|
|
rtol: float = 1e-3,
|
|
|
|
verbose: bool = False,
|
|
|
|
):
|
2023-08-03 09:50:15 +00:00
|
|
|
for suffix in layer_suffix:
|
|
|
|
org_weight = getattr_(org_model, suffix).weight
|
|
|
|
sharded_weight = getattr_(sharded_model, suffix).weight
|
|
|
|
|
2023-12-22 02:44:00 +00:00
|
|
|
# skip if layer is not held by this process
|
|
|
|
if sharded_weight is None:
|
|
|
|
continue
|
|
|
|
|
2023-08-03 09:50:15 +00:00
|
|
|
if is_distributed_tensor(sharded_weight) or is_customized_distributed_tensor(sharded_weight):
|
[shardformer] refactor embedding resize (#5603)
* [branch rebase] rebase main to Feature/resize_embedding (#5554)
* fix
* [release] update version (#5411)
* [hotfix] fix typo s/keywrods/keywords etc. (#5429)
* [devops] fix compatibility (#5444)
* [devops] fix compatibility
* [hotfix] update compatibility test on pr
* [devops] fix compatibility
* [devops] record duration during comp test
* [test] decrease test duration
* fix falcon
* [shardformer] fix gathering output when using tensor parallelism (#5431)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* [doc] release Open-Sora 1.0 with model weights (#5468)
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] update open-sora demo (#5479)
* [doc] update open-sora demo
* [doc] update open-sora demo
* [doc] update open-sora demo
* [example] add grok-1 inference (#5485)
* [misc] add submodule
* remove submodule
* [example] support grok-1 tp inference
* [example] add grok-1 inference script
* [example] refactor code
* [example] add grok-1 readme
* [exmaple] add test ci
* [exmaple] update readme
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* [CI] run pre-commit (#5577)
* fix
* [release] update version (#5411)
* [hotfix] fix typo s/keywrods/keywords etc. (#5429)
* [devops] fix compatibility (#5444)
* [devops] fix compatibility
* [hotfix] update compatibility test on pr
* [devops] fix compatibility
* [devops] record duration during comp test
* [test] decrease test duration
* fix falcon
* [shardformer] fix gathering output when using tensor parallelism (#5431)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* [doc] release Open-Sora 1.0 with model weights (#5468)
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] release Open-Sora 1.0 with model weights
* [doc] update open-sora demo (#5479)
* [doc] update open-sora demo
* [doc] update open-sora demo
* [doc] update open-sora demo
* [example] add grok-1 inference (#5485)
* [misc] add submodule
* remove submodule
* [example] support grok-1 tp inference
* [example] add grok-1 inference script
* [example] refactor code
* [example] add grok-1 readme
* [exmaple] add test ci
* [exmaple] update readme
* run pre-commit
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
* [rebase] rebase main to resize-embedding (#5581)
* [release] grok-1 314b inference (#5490)
* [release] grok-1 inference
* [release] grok-1 inference
* [release] grok-1 inference
* [example] update Grok-1 inference (#5495)
* revise grok-1 example
* remove unused arg in scripts
* prevent re-installing torch
* update readme
* revert modifying colossalai requirements
* add perf
* trivial
* add tokenizer url
* [hotfix] set return_outputs=False in examples and polish code (#5404)
* fix: simplify merge_batch
* fix: use return_outputs=False to eliminate extra memory consumption
* feat: add return_outputs warning
* style: remove `return_outputs=False` as it is the default value
* [release] grok-1 inference benchmark (#5500)
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [release] grok-1 inference benchmark
* [shardformer]Fix lm parallel. (#5480)
* fix
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* fix lm forward distribution
* fix
* test ci
* fix
* [fix] fix grok-1 example typo (#5506)
* [devops] fix example test ci (#5504)
* Fix ColoTensorSpec for py11 (#5440)
* fixed layout converter caching and updated tester
* Empty-Commit
* [shardformer] update colo attention to support custom mask (#5510)
* [feature] refactor colo attention (#5462)
* [extension] update api
* [feature] add colo attention
* [feature] update sdpa
* [feature] update npu attention
* [feature] update flash-attn
* [test] add flash attn test
* [test] update flash attn test
* [shardformer] update modeling to fit colo attention (#5465)
* [misc] refactor folder structure
* [shardformer] update llama flash-attn
* [shardformer] fix llama policy
* [devops] update tensornvme install
* [test] update llama test
* [shardformer] update colo attn kernel dispatch
* [shardformer] update blip2
* [shardformer] update chatglm
* [shardformer] update gpt2
* [shardformer] update gptj
* [shardformer] update opt
* [shardformer] update vit
* [shardformer] update colo attention mask prep
* [shardformer] update whisper
* [test] fix shardformer tests (#5514)
* [test] fix shardformer tests
* [test] fix shardformer tests
* [format] applied code formatting on changed files in pull request 5510 (#5517)
Co-authored-by: github-actions <github-actions@github.com>
* [shardformer] fix pipeline forward error if custom layer distribution is used (#5189)
* Use self.[distribute_layers|get_stage_index] to exploit custom layer distribution
* Change static methods for t5 layer distribution to member functions
* Change static methods for whisper layer distribution to member functions
* Replace whisper policy usage with self one
* Fix test case to use non-static layer distribution methods
* fix: fix typo
---------
Co-authored-by: Wenhao Chen <cwher@outlook.com>
* [Fix] Grok-1 use tokenizer from the same pretrained path (#5532)
* [fix] use tokenizer from the same pretrained path
* trust remote code
* [ColossalChat] Update RLHF V2 (#5286)
* Add dpo. Fix sft, ppo, lora. Refactor all
* fix and tested ppo
* 2 nd round refactor
* add ci tests
* fix ci
* fix ci
* fix readme, style
* fix readme style
* fix style, fix benchmark
* reproduce benchmark result, remove useless files
* rename to ColossalChat
* use new image
* fix ci workflow
* fix ci
* use local model/tokenizer for ci tests
* fix ci
* fix ci
* fix ci
* fix ci timeout
* fix rm progress bar. fix ci timeout
* fix ci
* fix ci typo
* remove 3d plugin from ci temporary
* test environment
* cannot save optimizer
* support chat template
* fix readme
* fix path
* test ci locally
* restore build_or_pr
* fix ci data path
* fix benchmark
* fix ci, move ci tests to 3080, disable fast tokenizer
* move ci to 85
* support flash attention 2
* add all-in-one data preparation script. Fix colossal-llama2-chat chat template
* add hardware requirements
* move ci test data
* fix save_model, add unwrap
* fix missing bos
* fix missing bos; support grad accumulation with gemini
* fix ci
* fix ci
* fix ci
* fix llama2 chat template config
* debug sft
* debug sft
* fix colossalai version requirement
* fix ci
* add sanity check to prevent NaN loss
* fix requirements
* add dummy data generation script
* add dummy data generation script
* add dummy data generation script
* add dummy data generation script
* update readme
* update readme
* update readme and ignore
* fix logger bug
* support parallel_output
* modify data preparation logic
* fix tokenization
* update lr
* fix inference
* run pre-commit
---------
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
* [shardformer, pipeline] add `gradient_checkpointing_ratio` and heterogenous shard policy for llama (#5508)
* feat: add `GradientCheckpointConfig` and `PipelineGradientCheckpointConfig`
* feat: apply `GradientCheckpointConfig` to policy and llama_forward
* feat: move `distribute_layer` and `get_stage_index` to PipelineStageManager
* fix: add optional args for `distribute_layer` and `get_stage_index`
* fix: fix changed API calls
* test: update llama tests
* style: polish `GradientCheckpointConfig`
* fix: fix pipeline utils tests
* fix incorrect sharding without zero (#5545)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [shardformer] Sequence Parallelism Optimization (#5533)
* sequence parallel optimization
* validate sequence parallel in llama (code to be polished)
* shardformer api writing
* integrate sequence parallel in ShardFormer
* fix pp bugs and sp bugs for LlaMa model
* integrating ring-based sequence parallelism into ShardFormer
* [sequence parallelism]: Add fused megatron function
* integrating ring-based sequence parallelism into ShardFormer
---------
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
* fix bugs when useing sp and flashattention together
* fix operation function name
* support flash attention for ulysses-style sp
* clarify sp process group
* fix compatibility bugs in moe plugin
* fix fused linear bugs
* fix linear layer test
* support gpt model all-to-all sp
* modify shard data dimension (meant to be dim=-1)
* support megtron-style sp and distributed attn for llama model
* [shardformer] add megatron sp to llama
* support llama7B 128k with distributed attention
* [shardformer] robustness enhancement
* add block attn
* sp mode 1: keep input as a complete sequence
* fix sp compatability
* finish sp mode 3 support for gpt
* using all_to_all_single when batch size is 1
* support mode 2 sp in gpt2 (#5)
* [shardformer] add megatron sp to llama
* support llama7B 128k with distributed attention
* [shardformer] robustness enhancement
* add block attn
* sp mode 1: keep input as a complete sequence
* fix sp compatability
* refactor ring implementation
* support mode 2 sp in gpt2
* polish code
* enable distributed attn mask when using sp mode 2 and 3 in llama
* automatically enable flash attn when using sp mode 2 and 3 in llama
* inplace attn mask
* add zero2 support for sequence parallel
* polish code
* fix bugs
* fix gemini checkpoint io
* loose tensor checking atol and rtol
* add comment
* fix llama layernorm grad
* fix zero grad
* fix zero grad
* fix conflict
* update split and gather auto grad func
* sequence parallel: inside text split (#6)
* polish code (part 1)
* polish code (part 2)
* polish code (part 2.5)
* polish code (part 3)
* sequence parallel: inside text split
* miscellaneous minor fixes
* polish code
* fix ulysses style ZeRO
* sequence parallel: inside text split
* miscellaneous minor fixes
* disaggregate sp group and dp group for sp
* fix llama and gpt sp
* polish code
* move ulysses grad sync to ddp (#9)
* remove zero_stage and unbind the grad sync for alltoall sp
* add 2d group creation test
* move ulysses grad sync to ddp
* add 2d group creation test
* remove useless code
* change shard config not to enable sp when enable_all_optimizations
* add sp warnings for several model
* remove useless code
---------
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
* [hotfix] quick fixes to make legacy tutorials runnable (#5559)
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
* [fix] fix typo s/muiti-node /multi-node etc. (#5448)
* [hotfix] fix typo s/get_defualt_parser /get_default_parser (#5548)
* [devops] remove post commit ci (#5566)
* [devops] remove post commit ci
* [misc] run pre-commit on all files
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
---------
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com>
Co-authored-by: Wenhao Chen <cwher@outlook.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: Rocky Duan <dementrock@users.noreply.github.com>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
* [shardformer]enable padding vocabulary size. (#5489)
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
* fix
fix
fix
* fix gather output
* fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* revert
* padding vocab
* padding vocabe
* fix
* fix
* fxi
* test ci
* fix
fix
fix
fix
* fix
fix
* fix
* fix
* Update hybrid_parallel_plugin.py
fix
fix
fix
* fix
fix
* fix
fix
* fix
* resolve super init
resolve super init
resolve super init
resolve super init
* resolve comments
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* vocab checkpointio
* padding vocab_size when using pipeline parallellism
padding vocab_size when using pipeline parallellism
fix
fix
* fix
fix
fix
* fix
* fix
fix resize embedding
fix resize embedding
* fix resize embedding
fix
* revert
* revert
* padding vocab
* fix
* fix
fix
* fix
fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix ci
* fix
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
* cherry-pick
* revert moe modify
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix
fix
fix
fix
fix
fix
fix
fix
* resolve comments
resolve comments
resolve comments
resolve comments
resolve comments
* ptensor
ptensor
resolve comments
fix
fix
fix
fix
fix
resolve comments
resolve comments
resolve comments
resolve comments
resolve comments
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix rebase
* fix rebase
---------
Co-authored-by: Hongxin Liu <lhx0217@gmail.com>
Co-authored-by: digger yu <digger-yu@outlook.com>
Co-authored-by: binmakeswell <binmakeswell@gmail.com>
Co-authored-by: Yuanheng Zhao <54058983+yuanheng-zhao@users.noreply.github.com>
Co-authored-by: Wenhao Chen <cwher@outlook.com>
Co-authored-by: Rocky Duan <dementrock@users.noreply.github.com>
Co-authored-by: Edenzzzz <wtan45@wisc.edu>
Co-authored-by: Edenzzzz <wenxuan.tan@wisc.edu>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: github-actions <github-actions@github.com>
Co-authored-by: Insu Jang <insujang@umich.edu>
Co-authored-by: YeAnbang <44796419+YeAnbang@users.noreply.github.com>
Co-authored-by: Tong Li <tong.li352711588@gmail.com>
Co-authored-by: Zhongkai Zhao <kanezz620@gmail.com>
Co-authored-by: linsj20 <linsj20@mails.tsinghua.edu.cn>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2024-04-18 08:10:18 +00:00
|
|
|
sharded_weight = gather_distributed_param(sharded_weight, keep_vars=False)
|
|
|
|
|
|
|
|
if is_padded_tensor(sharded_weight):
|
|
|
|
sharded_weight = to_unpadded_tensor(sharded_weight)
|
2023-08-03 09:50:15 +00:00
|
|
|
|
|
|
|
if verbose and dist.get_rank() == 0:
|
|
|
|
print(f"'{suffix}' weight: {org_weight}, {sharded_weight}")
|
|
|
|
|
2023-10-18 03:05:25 +00:00
|
|
|
assert_close(org_weight.float(), sharded_weight.float(), atol=atol, rtol=rtol)
|
2023-09-19 06:20:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
def get_grad_tensors_for_check(
|
|
|
|
org_model: Module,
|
|
|
|
sharded_model: Module,
|
|
|
|
layer_suffix: List[str],
|
|
|
|
tp_group: ProcessGroup = None,
|
|
|
|
dim: int = 0,
|
|
|
|
atol: float = 1e-5,
|
|
|
|
rtol: float = 1e-3,
|
|
|
|
verbose: bool = False,
|
|
|
|
name: str = None,
|
|
|
|
):
|
2023-08-30 06:50:34 +00:00
|
|
|
grad_to_check = {}
|
|
|
|
for suffix in layer_suffix:
|
|
|
|
org_grad = getattr_(org_model, suffix).weight.grad
|
|
|
|
shard_grad = getattr_(sharded_model, suffix).weight.grad
|
|
|
|
shard_weight = getattr_(sharded_model, suffix).weight
|
|
|
|
if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
|
2023-09-19 06:20:26 +00:00
|
|
|
shard_grad_list = [torch.zeros_like(shard_grad).to("cuda") for _ in range(dist.get_world_size(tp_group))]
|
2023-08-30 06:50:34 +00:00
|
|
|
dist.all_gather(shard_grad_list, shard_grad, tp_group)
|
|
|
|
shard_grad = torch.cat(shard_grad_list, dim=dim)
|
|
|
|
|
|
|
|
# embedding may be resized when using tensor parallel
|
2024-08-16 05:56:38 +00:00
|
|
|
try:
|
|
|
|
if shard_grad.shape[0] > org_grad.shape[0]:
|
|
|
|
shard_grad = shard_grad[: org_grad.shape[0], :]
|
|
|
|
except:
|
|
|
|
pass
|
2023-08-30 06:50:34 +00:00
|
|
|
if verbose and dist.get_rank() == 0:
|
|
|
|
print(f"'{suffix}' grad: {org_grad}, {shard_grad}")
|
|
|
|
grad_to_check[suffix] = {
|
|
|
|
"org_grad": org_grad.float(),
|
|
|
|
"shard_grad": shard_grad.float(),
|
|
|
|
"rtol": rtol,
|
2023-09-19 06:20:26 +00:00
|
|
|
"atol": atol,
|
2023-08-30 06:50:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return grad_to_check
|
|
|
|
|
|
|
|
|
|
|
|
# used by sam/blip2
|
2023-09-19 06:20:26 +00:00
|
|
|
def check_grad(
|
|
|
|
org_model: Module,
|
|
|
|
sharded_model: Module,
|
|
|
|
layer_suffix: List[str],
|
|
|
|
tp_group: ProcessGroup = None,
|
|
|
|
dim: int = 0,
|
|
|
|
atol: float = 1e-5,
|
|
|
|
rtol: float = 1e-3,
|
|
|
|
verbose: bool = False,
|
|
|
|
):
|
2023-08-03 06:51:36 +00:00
|
|
|
for suffix in layer_suffix:
|
2023-08-03 09:50:15 +00:00
|
|
|
org_grad = getattr_(org_model, suffix).weight.grad
|
2023-08-03 06:51:36 +00:00
|
|
|
shard_grad = getattr_(sharded_model, suffix).weight.grad
|
|
|
|
shard_weight = getattr_(sharded_model, suffix).weight
|
|
|
|
if is_distributed_tensor(shard_weight) or is_customized_distributed_tensor(shard_weight):
|
2023-09-19 06:20:26 +00:00
|
|
|
shard_grad_list = [torch.zeros_like(shard_grad).to("cuda") for _ in range(dist.get_world_size(tp_group))]
|
2023-08-03 09:50:15 +00:00
|
|
|
dist.all_gather(shard_grad_list, shard_grad, tp_group)
|
|
|
|
shard_grad = torch.cat(shard_grad_list, dim=dim)
|
|
|
|
|
|
|
|
# embedding may be resized when using tensor parallel
|
|
|
|
if shard_grad.shape[0] > org_grad.shape[0]:
|
2023-09-19 06:20:26 +00:00
|
|
|
shard_grad = shard_grad[: org_grad.shape[0], :]
|
2023-08-03 06:51:36 +00:00
|
|
|
if verbose and dist.get_rank() == 0:
|
2023-08-03 09:50:15 +00:00
|
|
|
print(f"'{suffix}' grad: {org_grad}, {shard_grad}")
|
2023-08-10 05:59:30 +00:00
|
|
|
|
2023-10-18 03:05:25 +00:00
|
|
|
assert_close(org_grad.float(), shard_grad.float(), rtol=rtol, atol=atol)
|
2023-08-16 08:11:57 +00:00
|
|
|
|
|
|
|
|
2023-09-19 06:20:26 +00:00
|
|
|
def unwrap_model(
|
2024-03-27 03:19:32 +00:00
|
|
|
module: Module,
|
|
|
|
base_model_class_name: Optional[str] = None,
|
|
|
|
base_model_attribute_name: Optional[str] = None,
|
2023-09-19 06:20:26 +00:00
|
|
|
):
|
2023-08-16 08:11:57 +00:00
|
|
|
if isinstance(module, HybridParallelModule):
|
|
|
|
module = module.unwrap()
|
|
|
|
if base_model_class_name is None:
|
|
|
|
return module
|
|
|
|
if module.__class__.__name__ == base_model_class_name:
|
|
|
|
return module
|
|
|
|
return getattr(module, base_model_attribute_name, None)
|
2023-08-30 06:50:34 +00:00
|
|
|
|
|
|
|
|
|
|
|
def check_all_grad_tensors(check_tensors):
|
|
|
|
"""
|
|
|
|
"org_grad": tensor to be compared from the original model
|
|
|
|
"shard_grad": tensor to be compared from the sharded model
|
|
|
|
"""
|
2024-08-16 05:56:38 +00:00
|
|
|
for idx, (suffix, check_info) in enumerate(check_tensors.items()):
|
2023-08-30 06:50:34 +00:00
|
|
|
org_grad = check_info["org_grad"]
|
|
|
|
shard_grad = check_info["shard_grad"]
|
|
|
|
rtol = check_info["rtol"]
|
|
|
|
atol = check_info["atol"]
|
2023-10-18 03:05:25 +00:00
|
|
|
assert_close(org_grad, shard_grad, atol=atol, rtol=rtol)
|