You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
ColossalAI/examples/images/vit/train.py

175 lines
7.3 KiB

import os
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from timm.models.vision_transformer import _create_vision_transformer
from titans.dataloader.imagenet import build_dali_imagenet
from tqdm import tqdm
Update metainfo patch branch (#2517) * init * rename and remove useless func * basic chunk * add evoformer * align evoformer * add meta * basic chunk * basic memory * finish basic inference memory estimation * finish memory estimation * fix bug * finish memory estimation * add part of index tracer * finish basic index tracer * add doc string * add doc str * polish code * polish code * update active log * polish code * add possible region search * finish region search loop * finish chunk define * support new op * rename index tracer * finishi codegen on msa * redesign index tracer, add source and change compute * pass outproduct mean * code format * code format * work with outerproductmean and msa * code style * code style * code style * code style * change threshold * support check_index_duplicate * support index dupilictae and update loop * support output * update memory estimate * optimise search * fix layernorm * move flow tracer * refactor flow tracer * format code * refactor flow search * code style * adapt codegen to prepose node * code style * remove abandoned function * remove flow tracer * code style * code style * reorder nodes * finish node reorder * update run * code style * add chunk select class * add chunk select * code style * add chunksize in emit, fix bug in reassgin shape * code style * turn off print mem * add evoformer openfold init * init openfold * add benchmark * add print * code style * code style * init openfold * update openfold * align openfold * use max_mem to control stratge * update source add * add reorder in mem estimator * improve reorder efficeincy * support ones_like, add prompt if fit mode search fail * fix a bug in ones like, dont gen chunk if dim size is 1 * fix bug again * update min memory stratege, reduce mem usage by 30% * last version of benchmark * refactor structure * restruct dir * update test * rename * take apart chunk code gen * close mem and code print * code format * rename ambiguous variable * seperate flow tracer * seperate input node dim search * seperate prepose_nodes * seperate non chunk input * seperate reorder * rename * ad reorder graph * seperate trace flow * code style * code style * fix typo * set benchmark * rename test * update codegen test * Fix state_dict key missing issue of the ZeroDDP (#2363) * Fix state_dict output for ZeroDDP duplicated parameters * Rewrite state_dict based on get_static_torch_model * Modify get_static_torch_model to be compatible with the lower version (ZeroDDP) * update codegen test * update codegen test * add chunk search test * code style * add available * [hotfix] fix gpt gemini example (#2404) * [hotfix] fix gpt gemini example * [example] add new assertions * remove autochunk_available * [workflow] added nightly release to pypi (#2403) * add comments * code style * add doc for search chunk * [doc] updated readme regarding pypi installation (#2406) * add doc for search * [doc] updated kernel-related optimisers' docstring (#2385) * [doc] updated kernel-related optimisers' docstring * polish doc * rename trace_index to trace_indice * rename function from index to indice * rename * rename in doc * [polish] polish code for get_static_torch_model (#2405) * [gemini] polish code * [testing] remove code * [gemini] make more robust * rename * rename * remove useless function * [worfklow] added coverage test (#2399) * [worfklow] added coverage test * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * add doc for trace indice * [docker] updated Dockerfile and release workflow (#2410) * add doc * update doc * add available * change imports * add test in import * [workflow] refactored the example check workflow (#2411) * [workflow] refactored the example check workflow * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * Update parallel_context.py (#2408) * [hotfix] add DISTPAN argument for benchmark (#2412) * change the benchmark config file * change config * revert config file * rename distpan to distplan * [workflow] added precommit check for code consistency (#2401) * [workflow] added precommit check for code consistency * polish code * polish code * polish code * polish code * polish code * polish code * polish code * adapt new fx * [workflow] added translation for non-english comments (#2414) * [setup] refactored setup.py for dependency graph (#2413) * change import * update doc * [workflow] auto comment if precommit check fails (#2417) * [hotfix] add norm clearing for the overflow step (#2416) * [examples] adding tflops to PaLM (#2365) * [workflow]auto comment with test coverage report (#2419) * [workflow]auto comment with test coverage report * polish code * polish yaml * [doc] added documentation for CI/CD (#2420) * [doc] added documentation for CI/CD * polish markdown * polish markdown * polish markdown * [example] removed duplicated stable diffusion example (#2424) * [zero] add inference mode and its unit test (#2418) * [workflow] report test coverage even if below threshold (#2431) * [example] improved the clarity yof the example readme (#2427) * [example] improved the clarity yof the example readme * polish workflow * polish workflow * polish workflow * polish workflow * polish workflow * polish workflow * [ddp] add is_ddp_ignored (#2434) [ddp] rename to is_ddp_ignored * [workflow] make test coverage report collapsable (#2436) * [autoparallel] add shard option (#2423) * [fx] allow native ckpt trace and codegen. (#2438) * [cli] provided more details if colossalai run fail (#2442) * [autoparallel] integrate device mesh initialization into autoparallelize (#2393) * [autoparallel] integrate device mesh initialization into autoparallelize * add megatron solution * update gpt autoparallel examples with latest api * adapt beta value to fit the current computation cost * [zero] fix state_dict and load_state_dict for ddp ignored parameters (#2443) * [ddp] add is_ddp_ignored [ddp] rename to is_ddp_ignored * [zero] fix state_dict and load_state_dict * fix bugs * [zero] update unit test for ZeroDDP * [example] updated the hybrid parallel tutorial (#2444) * [example] updated the hybrid parallel tutorial * polish code * [zero] add warning for ignored parameters (#2446) * [example] updated large-batch optimizer tutorial (#2448) * [example] updated large-batch optimizer tutorial * polish code * polish code * [example] fixed seed error in train_dreambooth_colossalai.py (#2445) * [workflow] fixed the on-merge condition check (#2452) * [workflow] automated the compatiblity test (#2453) * [workflow] automated the compatiblity test * polish code * [autoparallel] update binary elementwise handler (#2451) * [autoparallel] update binary elementwise handler * polish * [workflow] automated bdist wheel build (#2459) * [workflow] automated bdist wheel build * polish workflow * polish readme * polish readme * Fix False warning in initialize.py (#2456) * Update initialize.py * pre-commit run check * [examples] update autoparallel tutorial demo (#2449) * [examples] update autoparallel tutorial demo * add test_ci.sh * polish * add conda yaml * [cli] fixed hostname mismatch error (#2465) * [example] integrate autoparallel demo with CI (#2466) * [example] integrate autoparallel demo with CI * polish code * polish code * polish code * polish code * [zero] low level optim supports ProcessGroup (#2464) * [example] update vit ci script (#2469) * [example] update vit ci script * [example] update requirements * [example] update requirements * [example] integrate seq-parallel tutorial with CI (#2463) * [zero] polish low level optimizer (#2473) * polish pp middleware (#2476) Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> * [example] update gpt gemini example ci test (#2477) * [zero] add unit test for low-level zero init (#2474) * [workflow] fixed the skip condition of example weekly check workflow (#2481) * [example] stable diffusion add roadmap * add dummy test_ci.sh * [example] stable diffusion add roadmap (#2482) * [CI] add test_ci.sh for palm, opt and gpt (#2475) * polish code * [example] titans for gpt * polish readme * remove license * polish code * update readme * [example] titans for gpt (#2484) * [autoparallel] support origin activation ckpt on autoprallel system (#2468) * [autochunk] support evoformer tracer (#2485) support full evoformer tracer, which is a main module of alphafold. previously we just support a simplifed version of it. 1. support some evoformer's op in fx 2. support evoformer test 3. add repos for test code * [example] fix requirements (#2488) * [zero] add unit testings for hybrid parallelism (#2486) * [hotfix] gpt example titans bug #2493 * polish code and fix dataloader bugs * [hotfix] gpt example titans bug #2493 (#2494) * [fx] allow control of ckpt_codegen init (#2498) * [fx] allow control of ckpt_codegen init Currently in ColoGraphModule, ActivationCheckpointCodeGen will be set automatically in __init__. But other codegen can't be set if so. So I add an arg to control whether to set ActivationCheckpointCodeGen in __init__. * code style * [example] dreambooth example * add test_ci.sh to dreambooth * [autochunk] support autochunk on evoformer (#2497) * Revert "Update parallel_context.py (#2408)" This reverts commit 7d5640b9db01b501e95b66e91be9fe27b58d2e58. * add avg partition (#2483) Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> * [auto-chunk] support extramsa (#3) (#2504) * [utils] lazy init. (#2148) * [utils] lazy init. * [utils] remove description. * [utils] complete. * [utils] finalize. * [utils] fix names. * [autochunk] support parsing blocks (#2506) * [zero] add strict ddp mode (#2508) * [zero] add strict ddp mode * [polish] add comments for strict ddp mode * [zero] fix test error * [doc] update opt and tutorial links (#2509) * [workflow] fixed changed file detection (#2515) Co-authored-by: oahzxl <xuanlei.zhao@gmail.com> Co-authored-by: eric8607242 <e0928021388@gmail.com> Co-authored-by: HELSON <c2h214748@gmail.com> Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: Haofan Wang <haofanwang.ai@gmail.com> Co-authored-by: Jiarui Fang <fangjiarui123@gmail.com> Co-authored-by: ZijianYY <119492445+ZijianYY@users.noreply.github.com> Co-authored-by: YuliangLiu0306 <72588413+YuliangLiu0306@users.noreply.github.com> Co-authored-by: Super Daniel <78588128+super-dainiu@users.noreply.github.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: Ziyue Jiang <ziyue.jiang97@gmail.com> Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> Co-authored-by: oahzxl <43881818+oahzxl@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: Fazzie-Maqianli <55798671+Fazziekey@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
2 years ago
from vit import DummyDataLoader
import colossalai
from colossalai.core import global_context as gpc
from colossalai.logging import disable_existing_loggers, get_dist_logger
from colossalai.nn import CrossEntropyLoss
from colossalai.nn._ops import *
from colossalai.nn.lr_scheduler import CosineAnnealingWarmupLR
from colossalai.nn.optimizer import HybridAdam
from colossalai.nn.parallel.data_parallel import ColoDDP
from colossalai.tensor import ComputePattern, ComputeSpec, DistSpecManager, ProcessGroup, ShardSpec
from colossalai.utils import get_current_device
from colossalai.utils.model.colo_init_context import ColoInitContext
def init_1d_row_for_linear_weight_spec(model, world_size: int):
pg = ProcessGroup(tp_degree=world_size)
spec = (ShardSpec([-1], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
with DistSpecManager.no_grad():
for n, p in model.named_parameters():
if 'weight' in n and 'norm' not in n and 'patch_embed.proj.weight' not in n:
p.set_process_group(pg)
p.set_tensor_spec(*spec)
# Similarly, it's col split for Linear but row split for others.
def init_1d_col_for_linear_weight_bias_spec(model, world_size: int):
pg = ProcessGroup(tp_degree=world_size)
spec = (ShardSpec([0], [pg.tp_world_size()]), ComputeSpec(ComputePattern.TP1D))
with DistSpecManager.no_grad():
for n, p in model.named_parameters():
if ('weight' in n or 'bias' in n) and 'norm' not in n and ('patch_embed.proj.weight' not in n
and 'patch_embed.proj.bias' not in n):
p.set_process_group(pg)
p.set_tensor_spec(*spec)
def init_spec_func(model, tp_type):
world_size = torch.distributed.get_world_size()
if tp_type == 'row':
init_1d_row_for_linear_weight_spec(model, world_size)
elif tp_type == 'col':
init_1d_col_for_linear_weight_bias_spec(model, world_size)
else:
raise NotImplemented
def train_imagenet():
parser = colossalai.get_default_parser()
Update metainfo patch branch (#2517) * init * rename and remove useless func * basic chunk * add evoformer * align evoformer * add meta * basic chunk * basic memory * finish basic inference memory estimation * finish memory estimation * fix bug * finish memory estimation * add part of index tracer * finish basic index tracer * add doc string * add doc str * polish code * polish code * update active log * polish code * add possible region search * finish region search loop * finish chunk define * support new op * rename index tracer * finishi codegen on msa * redesign index tracer, add source and change compute * pass outproduct mean * code format * code format * work with outerproductmean and msa * code style * code style * code style * code style * change threshold * support check_index_duplicate * support index dupilictae and update loop * support output * update memory estimate * optimise search * fix layernorm * move flow tracer * refactor flow tracer * format code * refactor flow search * code style * adapt codegen to prepose node * code style * remove abandoned function * remove flow tracer * code style * code style * reorder nodes * finish node reorder * update run * code style * add chunk select class * add chunk select * code style * add chunksize in emit, fix bug in reassgin shape * code style * turn off print mem * add evoformer openfold init * init openfold * add benchmark * add print * code style * code style * init openfold * update openfold * align openfold * use max_mem to control stratge * update source add * add reorder in mem estimator * improve reorder efficeincy * support ones_like, add prompt if fit mode search fail * fix a bug in ones like, dont gen chunk if dim size is 1 * fix bug again * update min memory stratege, reduce mem usage by 30% * last version of benchmark * refactor structure * restruct dir * update test * rename * take apart chunk code gen * close mem and code print * code format * rename ambiguous variable * seperate flow tracer * seperate input node dim search * seperate prepose_nodes * seperate non chunk input * seperate reorder * rename * ad reorder graph * seperate trace flow * code style * code style * fix typo * set benchmark * rename test * update codegen test * Fix state_dict key missing issue of the ZeroDDP (#2363) * Fix state_dict output for ZeroDDP duplicated parameters * Rewrite state_dict based on get_static_torch_model * Modify get_static_torch_model to be compatible with the lower version (ZeroDDP) * update codegen test * update codegen test * add chunk search test * code style * add available * [hotfix] fix gpt gemini example (#2404) * [hotfix] fix gpt gemini example * [example] add new assertions * remove autochunk_available * [workflow] added nightly release to pypi (#2403) * add comments * code style * add doc for search chunk * [doc] updated readme regarding pypi installation (#2406) * add doc for search * [doc] updated kernel-related optimisers' docstring (#2385) * [doc] updated kernel-related optimisers' docstring * polish doc * rename trace_index to trace_indice * rename function from index to indice * rename * rename in doc * [polish] polish code for get_static_torch_model (#2405) * [gemini] polish code * [testing] remove code * [gemini] make more robust * rename * rename * remove useless function * [worfklow] added coverage test (#2399) * [worfklow] added coverage test * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * add doc for trace indice * [docker] updated Dockerfile and release workflow (#2410) * add doc * update doc * add available * change imports * add test in import * [workflow] refactored the example check workflow (#2411) * [workflow] refactored the example check workflow * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * Update parallel_context.py (#2408) * [hotfix] add DISTPAN argument for benchmark (#2412) * change the benchmark config file * change config * revert config file * rename distpan to distplan * [workflow] added precommit check for code consistency (#2401) * [workflow] added precommit check for code consistency * polish code * polish code * polish code * polish code * polish code * polish code * polish code * adapt new fx * [workflow] added translation for non-english comments (#2414) * [setup] refactored setup.py for dependency graph (#2413) * change import * update doc * [workflow] auto comment if precommit check fails (#2417) * [hotfix] add norm clearing for the overflow step (#2416) * [examples] adding tflops to PaLM (#2365) * [workflow]auto comment with test coverage report (#2419) * [workflow]auto comment with test coverage report * polish code * polish yaml * [doc] added documentation for CI/CD (#2420) * [doc] added documentation for CI/CD * polish markdown * polish markdown * polish markdown * [example] removed duplicated stable diffusion example (#2424) * [zero] add inference mode and its unit test (#2418) * [workflow] report test coverage even if below threshold (#2431) * [example] improved the clarity yof the example readme (#2427) * [example] improved the clarity yof the example readme * polish workflow * polish workflow * polish workflow * polish workflow * polish workflow * polish workflow * [ddp] add is_ddp_ignored (#2434) [ddp] rename to is_ddp_ignored * [workflow] make test coverage report collapsable (#2436) * [autoparallel] add shard option (#2423) * [fx] allow native ckpt trace and codegen. (#2438) * [cli] provided more details if colossalai run fail (#2442) * [autoparallel] integrate device mesh initialization into autoparallelize (#2393) * [autoparallel] integrate device mesh initialization into autoparallelize * add megatron solution * update gpt autoparallel examples with latest api * adapt beta value to fit the current computation cost * [zero] fix state_dict and load_state_dict for ddp ignored parameters (#2443) * [ddp] add is_ddp_ignored [ddp] rename to is_ddp_ignored * [zero] fix state_dict and load_state_dict * fix bugs * [zero] update unit test for ZeroDDP * [example] updated the hybrid parallel tutorial (#2444) * [example] updated the hybrid parallel tutorial * polish code * [zero] add warning for ignored parameters (#2446) * [example] updated large-batch optimizer tutorial (#2448) * [example] updated large-batch optimizer tutorial * polish code * polish code * [example] fixed seed error in train_dreambooth_colossalai.py (#2445) * [workflow] fixed the on-merge condition check (#2452) * [workflow] automated the compatiblity test (#2453) * [workflow] automated the compatiblity test * polish code * [autoparallel] update binary elementwise handler (#2451) * [autoparallel] update binary elementwise handler * polish * [workflow] automated bdist wheel build (#2459) * [workflow] automated bdist wheel build * polish workflow * polish readme * polish readme * Fix False warning in initialize.py (#2456) * Update initialize.py * pre-commit run check * [examples] update autoparallel tutorial demo (#2449) * [examples] update autoparallel tutorial demo * add test_ci.sh * polish * add conda yaml * [cli] fixed hostname mismatch error (#2465) * [example] integrate autoparallel demo with CI (#2466) * [example] integrate autoparallel demo with CI * polish code * polish code * polish code * polish code * [zero] low level optim supports ProcessGroup (#2464) * [example] update vit ci script (#2469) * [example] update vit ci script * [example] update requirements * [example] update requirements * [example] integrate seq-parallel tutorial with CI (#2463) * [zero] polish low level optimizer (#2473) * polish pp middleware (#2476) Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> * [example] update gpt gemini example ci test (#2477) * [zero] add unit test for low-level zero init (#2474) * [workflow] fixed the skip condition of example weekly check workflow (#2481) * [example] stable diffusion add roadmap * add dummy test_ci.sh * [example] stable diffusion add roadmap (#2482) * [CI] add test_ci.sh for palm, opt and gpt (#2475) * polish code * [example] titans for gpt * polish readme * remove license * polish code * update readme * [example] titans for gpt (#2484) * [autoparallel] support origin activation ckpt on autoprallel system (#2468) * [autochunk] support evoformer tracer (#2485) support full evoformer tracer, which is a main module of alphafold. previously we just support a simplifed version of it. 1. support some evoformer's op in fx 2. support evoformer test 3. add repos for test code * [example] fix requirements (#2488) * [zero] add unit testings for hybrid parallelism (#2486) * [hotfix] gpt example titans bug #2493 * polish code and fix dataloader bugs * [hotfix] gpt example titans bug #2493 (#2494) * [fx] allow control of ckpt_codegen init (#2498) * [fx] allow control of ckpt_codegen init Currently in ColoGraphModule, ActivationCheckpointCodeGen will be set automatically in __init__. But other codegen can't be set if so. So I add an arg to control whether to set ActivationCheckpointCodeGen in __init__. * code style * [example] dreambooth example * add test_ci.sh to dreambooth * [autochunk] support autochunk on evoformer (#2497) * Revert "Update parallel_context.py (#2408)" This reverts commit 7d5640b9db01b501e95b66e91be9fe27b58d2e58. * add avg partition (#2483) Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> * [auto-chunk] support extramsa (#3) (#2504) * [utils] lazy init. (#2148) * [utils] lazy init. * [utils] remove description. * [utils] complete. * [utils] finalize. * [utils] fix names. * [autochunk] support parsing blocks (#2506) * [zero] add strict ddp mode (#2508) * [zero] add strict ddp mode * [polish] add comments for strict ddp mode * [zero] fix test error * [doc] update opt and tutorial links (#2509) * [workflow] fixed changed file detection (#2515) Co-authored-by: oahzxl <xuanlei.zhao@gmail.com> Co-authored-by: eric8607242 <e0928021388@gmail.com> Co-authored-by: HELSON <c2h214748@gmail.com> Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: Haofan Wang <haofanwang.ai@gmail.com> Co-authored-by: Jiarui Fang <fangjiarui123@gmail.com> Co-authored-by: ZijianYY <119492445+ZijianYY@users.noreply.github.com> Co-authored-by: YuliangLiu0306 <72588413+YuliangLiu0306@users.noreply.github.com> Co-authored-by: Super Daniel <78588128+super-dainiu@users.noreply.github.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: Ziyue Jiang <ziyue.jiang97@gmail.com> Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> Co-authored-by: oahzxl <43881818+oahzxl@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: Fazzie-Maqianli <55798671+Fazziekey@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
2 years ago
parser.add_argument('--resume_from', default=False, action='store_true')
parser.add_argument('--dummy_data', default=False, action='store_true')
args = parser.parse_args()
colossalai.launch_from_torch(config=args.config)
use_ddp = gpc.config.USE_DDP
disable_existing_loggers()
logger = get_dist_logger()
if hasattr(gpc.config, 'LOG_PATH'):
if gpc.get_global_rank() == 0:
log_path = gpc.config.LOG_PATH
if not os.path.exists(log_path):
os.mkdir(log_path)
logger.log_to_file(log_path)
logger.info('Build data loader', ranks=[0])
Update metainfo patch branch (#2517) * init * rename and remove useless func * basic chunk * add evoformer * align evoformer * add meta * basic chunk * basic memory * finish basic inference memory estimation * finish memory estimation * fix bug * finish memory estimation * add part of index tracer * finish basic index tracer * add doc string * add doc str * polish code * polish code * update active log * polish code * add possible region search * finish region search loop * finish chunk define * support new op * rename index tracer * finishi codegen on msa * redesign index tracer, add source and change compute * pass outproduct mean * code format * code format * work with outerproductmean and msa * code style * code style * code style * code style * change threshold * support check_index_duplicate * support index dupilictae and update loop * support output * update memory estimate * optimise search * fix layernorm * move flow tracer * refactor flow tracer * format code * refactor flow search * code style * adapt codegen to prepose node * code style * remove abandoned function * remove flow tracer * code style * code style * reorder nodes * finish node reorder * update run * code style * add chunk select class * add chunk select * code style * add chunksize in emit, fix bug in reassgin shape * code style * turn off print mem * add evoformer openfold init * init openfold * add benchmark * add print * code style * code style * init openfold * update openfold * align openfold * use max_mem to control stratge * update source add * add reorder in mem estimator * improve reorder efficeincy * support ones_like, add prompt if fit mode search fail * fix a bug in ones like, dont gen chunk if dim size is 1 * fix bug again * update min memory stratege, reduce mem usage by 30% * last version of benchmark * refactor structure * restruct dir * update test * rename * take apart chunk code gen * close mem and code print * code format * rename ambiguous variable * seperate flow tracer * seperate input node dim search * seperate prepose_nodes * seperate non chunk input * seperate reorder * rename * ad reorder graph * seperate trace flow * code style * code style * fix typo * set benchmark * rename test * update codegen test * Fix state_dict key missing issue of the ZeroDDP (#2363) * Fix state_dict output for ZeroDDP duplicated parameters * Rewrite state_dict based on get_static_torch_model * Modify get_static_torch_model to be compatible with the lower version (ZeroDDP) * update codegen test * update codegen test * add chunk search test * code style * add available * [hotfix] fix gpt gemini example (#2404) * [hotfix] fix gpt gemini example * [example] add new assertions * remove autochunk_available * [workflow] added nightly release to pypi (#2403) * add comments * code style * add doc for search chunk * [doc] updated readme regarding pypi installation (#2406) * add doc for search * [doc] updated kernel-related optimisers' docstring (#2385) * [doc] updated kernel-related optimisers' docstring * polish doc * rename trace_index to trace_indice * rename function from index to indice * rename * rename in doc * [polish] polish code for get_static_torch_model (#2405) * [gemini] polish code * [testing] remove code * [gemini] make more robust * rename * rename * remove useless function * [worfklow] added coverage test (#2399) * [worfklow] added coverage test * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * add doc for trace indice * [docker] updated Dockerfile and release workflow (#2410) * add doc * update doc * add available * change imports * add test in import * [workflow] refactored the example check workflow (#2411) * [workflow] refactored the example check workflow * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * polish code * Update parallel_context.py (#2408) * [hotfix] add DISTPAN argument for benchmark (#2412) * change the benchmark config file * change config * revert config file * rename distpan to distplan * [workflow] added precommit check for code consistency (#2401) * [workflow] added precommit check for code consistency * polish code * polish code * polish code * polish code * polish code * polish code * polish code * adapt new fx * [workflow] added translation for non-english comments (#2414) * [setup] refactored setup.py for dependency graph (#2413) * change import * update doc * [workflow] auto comment if precommit check fails (#2417) * [hotfix] add norm clearing for the overflow step (#2416) * [examples] adding tflops to PaLM (#2365) * [workflow]auto comment with test coverage report (#2419) * [workflow]auto comment with test coverage report * polish code * polish yaml * [doc] added documentation for CI/CD (#2420) * [doc] added documentation for CI/CD * polish markdown * polish markdown * polish markdown * [example] removed duplicated stable diffusion example (#2424) * [zero] add inference mode and its unit test (#2418) * [workflow] report test coverage even if below threshold (#2431) * [example] improved the clarity yof the example readme (#2427) * [example] improved the clarity yof the example readme * polish workflow * polish workflow * polish workflow * polish workflow * polish workflow * polish workflow * [ddp] add is_ddp_ignored (#2434) [ddp] rename to is_ddp_ignored * [workflow] make test coverage report collapsable (#2436) * [autoparallel] add shard option (#2423) * [fx] allow native ckpt trace and codegen. (#2438) * [cli] provided more details if colossalai run fail (#2442) * [autoparallel] integrate device mesh initialization into autoparallelize (#2393) * [autoparallel] integrate device mesh initialization into autoparallelize * add megatron solution * update gpt autoparallel examples with latest api * adapt beta value to fit the current computation cost * [zero] fix state_dict and load_state_dict for ddp ignored parameters (#2443) * [ddp] add is_ddp_ignored [ddp] rename to is_ddp_ignored * [zero] fix state_dict and load_state_dict * fix bugs * [zero] update unit test for ZeroDDP * [example] updated the hybrid parallel tutorial (#2444) * [example] updated the hybrid parallel tutorial * polish code * [zero] add warning for ignored parameters (#2446) * [example] updated large-batch optimizer tutorial (#2448) * [example] updated large-batch optimizer tutorial * polish code * polish code * [example] fixed seed error in train_dreambooth_colossalai.py (#2445) * [workflow] fixed the on-merge condition check (#2452) * [workflow] automated the compatiblity test (#2453) * [workflow] automated the compatiblity test * polish code * [autoparallel] update binary elementwise handler (#2451) * [autoparallel] update binary elementwise handler * polish * [workflow] automated bdist wheel build (#2459) * [workflow] automated bdist wheel build * polish workflow * polish readme * polish readme * Fix False warning in initialize.py (#2456) * Update initialize.py * pre-commit run check * [examples] update autoparallel tutorial demo (#2449) * [examples] update autoparallel tutorial demo * add test_ci.sh * polish * add conda yaml * [cli] fixed hostname mismatch error (#2465) * [example] integrate autoparallel demo with CI (#2466) * [example] integrate autoparallel demo with CI * polish code * polish code * polish code * polish code * [zero] low level optim supports ProcessGroup (#2464) * [example] update vit ci script (#2469) * [example] update vit ci script * [example] update requirements * [example] update requirements * [example] integrate seq-parallel tutorial with CI (#2463) * [zero] polish low level optimizer (#2473) * polish pp middleware (#2476) Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> * [example] update gpt gemini example ci test (#2477) * [zero] add unit test for low-level zero init (#2474) * [workflow] fixed the skip condition of example weekly check workflow (#2481) * [example] stable diffusion add roadmap * add dummy test_ci.sh * [example] stable diffusion add roadmap (#2482) * [CI] add test_ci.sh for palm, opt and gpt (#2475) * polish code * [example] titans for gpt * polish readme * remove license * polish code * update readme * [example] titans for gpt (#2484) * [autoparallel] support origin activation ckpt on autoprallel system (#2468) * [autochunk] support evoformer tracer (#2485) support full evoformer tracer, which is a main module of alphafold. previously we just support a simplifed version of it. 1. support some evoformer's op in fx 2. support evoformer test 3. add repos for test code * [example] fix requirements (#2488) * [zero] add unit testings for hybrid parallelism (#2486) * [hotfix] gpt example titans bug #2493 * polish code and fix dataloader bugs * [hotfix] gpt example titans bug #2493 (#2494) * [fx] allow control of ckpt_codegen init (#2498) * [fx] allow control of ckpt_codegen init Currently in ColoGraphModule, ActivationCheckpointCodeGen will be set automatically in __init__. But other codegen can't be set if so. So I add an arg to control whether to set ActivationCheckpointCodeGen in __init__. * code style * [example] dreambooth example * add test_ci.sh to dreambooth * [autochunk] support autochunk on evoformer (#2497) * Revert "Update parallel_context.py (#2408)" This reverts commit 7d5640b9db01b501e95b66e91be9fe27b58d2e58. * add avg partition (#2483) Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> * [auto-chunk] support extramsa (#3) (#2504) * [utils] lazy init. (#2148) * [utils] lazy init. * [utils] remove description. * [utils] complete. * [utils] finalize. * [utils] fix names. * [autochunk] support parsing blocks (#2506) * [zero] add strict ddp mode (#2508) * [zero] add strict ddp mode * [polish] add comments for strict ddp mode * [zero] fix test error * [doc] update opt and tutorial links (#2509) * [workflow] fixed changed file detection (#2515) Co-authored-by: oahzxl <xuanlei.zhao@gmail.com> Co-authored-by: eric8607242 <e0928021388@gmail.com> Co-authored-by: HELSON <c2h214748@gmail.com> Co-authored-by: Frank Lee <somerlee.9@gmail.com> Co-authored-by: Haofan Wang <haofanwang.ai@gmail.com> Co-authored-by: Jiarui Fang <fangjiarui123@gmail.com> Co-authored-by: ZijianYY <119492445+ZijianYY@users.noreply.github.com> Co-authored-by: YuliangLiu0306 <72588413+YuliangLiu0306@users.noreply.github.com> Co-authored-by: Super Daniel <78588128+super-dainiu@users.noreply.github.com> Co-authored-by: ver217 <lhx0217@gmail.com> Co-authored-by: Ziyue Jiang <ziyue.jiang97@gmail.com> Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com> Co-authored-by: oahzxl <43881818+oahzxl@users.noreply.github.com> Co-authored-by: binmakeswell <binmakeswell@gmail.com> Co-authored-by: Fazzie-Maqianli <55798671+Fazziekey@users.noreply.github.com> Co-authored-by: アマデウス <kurisusnowdeng@users.noreply.github.com>
2 years ago
if not args.dummy_data:
root = os.environ['DATA']
train_dataloader, test_dataloader = build_dali_imagenet(root,
train_batch_size=gpc.config.BATCH_SIZE,
test_batch_size=gpc.config.BATCH_SIZE)
else:
train_dataloader = DummyDataLoader(length=10,
batch_size=gpc.config.BATCH_SIZE,
category=gpc.config.NUM_CLASSES,
image_size=gpc.config.IMG_SIZE,
return_dict=False)
test_dataloader = DummyDataLoader(length=5,
batch_size=gpc.config.BATCH_SIZE,
category=gpc.config.NUM_CLASSES,
image_size=gpc.config.IMG_SIZE,
return_dict=False)
logger.info('Build model', ranks=[0])
model_kwargs = dict(img_size=gpc.config.IMG_SIZE,
patch_size=gpc.config.PATCH_SIZE,
embed_dim=gpc.config.HIDDEN_SIZE,
depth=gpc.config.DEPTH,
num_heads=gpc.config.NUM_HEADS,
mlp_ratio=gpc.config.MLP_RATIO,
num_classes=gpc.config.NUM_CLASSES,
drop_rate=0.1,
attn_drop_rate=0.1,
weight_init='jax')
with ColoInitContext(device=get_current_device()):
model = _create_vision_transformer('vit_small_patch16_224', pretrained=False, **model_kwargs)
init_spec_func(model, gpc.config.TP_TYPE)
world_size = torch.distributed.get_world_size()
model = ColoDDP(module=model, process_group=ProcessGroup(tp_degree=world_size))
logger.info('Build criterion, optimizer, lr_scheduler', ranks=[0])
optimizer = HybridAdam(model.parameters(), lr=gpc.config.LEARNING_RATE, weight_decay=gpc.config.WEIGHT_DECAY)
criterion = CrossEntropyLoss()
lr_scheduler = CosineAnnealingWarmupLR(optimizer=optimizer,
total_steps=gpc.config.NUM_EPOCHS,
warmup_steps=gpc.config.WARMUP_EPOCHS)
start_epoch = 0
if args.resume_from:
load_model = torch.load(args.resume_from + '_model.pth')
start_epoch = load_model['epoch']
model.load_state_dict(load_model['model'])
load_optim = torch.load(args.resume_from + '_optim_rank_{}.pth'.format(dist.get_rank()))
optimizer.load_state_dict(load_optim['optim'])
for epoch in range(start_epoch, gpc.config.NUM_EPOCHS):
model.train()
for index, (x, y) in tqdm(enumerate(train_dataloader), total=len(train_dataloader), leave=False):
x, y = x.cuda(), y.cuda()
output = model(x)
loss = criterion(output, y)
loss = loss / gpc.config.gradient_accumulation
if use_ddp:
model.backward(loss)
else:
loss.backward()
if (index + 1) % gpc.config.gradient_accumulation == 0:
optimizer.step()
if use_ddp:
model.zero_grad()
else:
optimizer.zero_grad()
logger.info(
f"Finish Train Epoch [{epoch+1}/{gpc.config.NUM_EPOCHS}] loss: {loss.item():.3f} lr: {optimizer.state_dict()['param_groups'][0]['lr']}",
ranks=[0])
model.eval()
test_loss = 0
correct = 0
test_sum = 0
with torch.no_grad():
for index, (x, y) in tqdm(enumerate(test_dataloader), total=len(test_dataloader), leave=False):
x, y = x.cuda(), y.cuda()
output = model(x)
test_loss += F.cross_entropy(output, y, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(y.view_as(pred)).sum().item()
test_sum += y.size(0)
test_loss /= test_sum
logger.info(
f"Finish Test Epoch [{epoch+1}/{gpc.config.NUM_EPOCHS}] loss: {test_loss:.3f} Accuracy: [{correct}/{test_sum}]({correct/test_sum:.3f})",
ranks=[0])
lr_scheduler.step()
if __name__ == '__main__':
train_imagenet()