From 404ecbdcc61b17c7474806f5360c4fedf10cc37d Mon Sep 17 00:00:00 2001 From: zbian Date: Thu, 28 Oct 2021 18:21:23 +0200 Subject: [PATCH] Migrated project --- .gitignore | 144 + MANIFEST.in | 4 + README.md | 104 + colossalai/__init__.py | 4 + colossalai/builder/__init__.py | 2 + colossalai/builder/builder.py | 262 ++ colossalai/builder/pipeline.py | 226 ++ colossalai/checkpointing.py | 215 + colossalai/communication/__init__.py | 14 + colossalai/communication/collective.py | 84 + colossalai/communication/p2p.py | 333 ++ colossalai/communication/ring.py | 54 + colossalai/communication/utils.py | 73 + colossalai/constants.py | 31 + colossalai/context/__init__.py | 5 + colossalai/context/_utils.py | 70 + colossalai/context/config.py | 99 + colossalai/context/parallel_context.py | 454 +++ colossalai/context/parallel_mode.py | 44 + .../process_group_initializer/__init__.py | 15 + .../initializer_1d.py | 44 + .../initializer_2d.py | 123 + .../initializer_2p5d.py | 255 ++ .../initializer_3d.py | 172 + .../initializer_data.py | 41 + .../initializer_pipeline.py | 63 + .../initializer_sequence.py | 27 + .../initializer_tensor.py | 41 + .../process_group_initializer.py | 30 + colossalai/context/random/__init__.py | 8 + colossalai/context/random/_helper.py | 144 + colossalai/context/random/seed_manager.py | 74 + colossalai/core.py | 16 + colossalai/engine/__init__.py | 7 + colossalai/engine/_base_engine.py | 170 + colossalai/engine/amp_type.py | 10 + .../engine/gradient_handler/__init__.py | 5 + .../_base_gradient_handler.py | 25 + .../_data_parallel_gradient_handler.py | 48 + .../_zero_gradient_handler.py | 16 + colossalai/engine/schedule/__init__.py | 5 + colossalai/engine/schedule/_base_schedule.py | 129 + colossalai/engine/schedule/_no_pipeline.py | 185 + colossalai/engine/schedule/_pipeline.py | 316 ++ colossalai/engine/schedule/_utils.py | 16 + colossalai/initialize.py | 371 ++ colossalai/logging/__init__.py | 26 + colossalai/logging/logging.py | 97 + colossalai/nn/__init__.py | 6 + colossalai/nn/data/__init__.py | 3 + colossalai/nn/data/_utils.py | 14 + colossalai/nn/data/base_dataset.py | 17 + colossalai/nn/data/caltech101_dataset.py | 43 + colossalai/nn/data/cifar10_dataset.py | 44 + colossalai/nn/data/sampler/__init__.py | 4 + colossalai/nn/data/sampler/base_sampler.py | 19 + .../nn/data/sampler/data_parallel_sampler.py | 102 + colossalai/nn/layer/__init__.py | 9 + colossalai/nn/layer/_common_utils.py | 63 + colossalai/nn/layer/_parallel_utilities.py | 138 + colossalai/nn/layer/base_layer.py | 27 + colossalai/nn/layer/parallel_1d/__init__.py | 5 + colossalai/nn/layer/parallel_1d/_utils.py | 15 + colossalai/nn/layer/parallel_1d/layers.py | 166 + colossalai/nn/layer/parallel_2d/__init__.py | 11 + colossalai/nn/layer/parallel_2d/_operation.py | 522 +++ .../nn/layer/parallel_2d/_transformer.py | 220 + colossalai/nn/layer/parallel_2d/_utils.py | 23 + colossalai/nn/layer/parallel_2d/_vit.py | 391 ++ colossalai/nn/layer/parallel_2d/layers.py | 258 ++ colossalai/nn/layer/parallel_2p5d/__init__.py | 13 + .../nn/layer/parallel_2p5d/_operation.py | 535 +++ .../nn/layer/parallel_2p5d/_transformer.py | 206 + colossalai/nn/layer/parallel_2p5d/_utils.py | 25 + colossalai/nn/layer/parallel_2p5d/_vit.py | 351 ++ colossalai/nn/layer/parallel_2p5d/layers.py | 266 ++ colossalai/nn/layer/parallel_3d/__init__.py | 9 + colossalai/nn/layer/parallel_3d/_operation.py | 349 ++ colossalai/nn/layer/parallel_3d/_utils.py | 49 + colossalai/nn/layer/parallel_3d/_vit.py | 368 ++ colossalai/nn/layer/parallel_3d/layers.py | 172 + .../nn/layer/parallel_sequence/__init__.py | 4 + .../nn/layer/parallel_sequence/_operation.py | 169 + .../nn/layer/parallel_sequence/_utils.py | 15 + .../nn/layer/parallel_sequence/layers.py | 188 + .../parallel_vision_transformer/__init__.py | 3 + .../parallel_vision_transformer/layers.py | 59 + .../nn/layer/vanilla_resnet/__init__.py | 5 + .../nn/layer/vanilla_resnet/basic_block.py | 64 + .../nn/layer/vanilla_resnet/bottleneck.py | 69 + colossalai/nn/layer/vanilla_resnet/conv.py | 15 + .../nn/layer/vanilla_resnet/reslayer.py | 63 + .../vanilla_vision_transformer/__init__.py | 7 + .../vanilla_vision_transformer/layers.py | 244 ++ colossalai/nn/layer/wrapper/__init__.py | 3 + colossalai/nn/layer/wrapper/lambda_wrapper.py | 37 + colossalai/nn/loss/__init__.py | 6 + colossalai/nn/loss/base_loss.py | 13 + colossalai/nn/loss/cross_entropy_1d.py | 120 + colossalai/nn/loss/cross_entropy_2d.py | 128 + colossalai/nn/loss/cross_entropy_2p5d.py | 124 + colossalai/nn/loss/cross_entropy_3d.py | 193 + colossalai/nn/lr_scheduler/__init__.py | 13 + colossalai/nn/lr_scheduler/cosine.py | 129 + colossalai/nn/lr_scheduler/delayed.py | 149 + colossalai/nn/lr_scheduler/linear.py | 45 + colossalai/nn/lr_scheduler/multistep.py | 70 + colossalai/nn/lr_scheduler/onecycle.py | 97 + colossalai/nn/lr_scheduler/poly.py | 65 + colossalai/nn/lr_scheduler/torch.py | 122 + colossalai/nn/model/__init__.py | 3 + colossalai/nn/model/base_model.py | 38 + .../nn/model/vanilla_resnet/__init__.py | 3 + colossalai/nn/model/vanilla_resnet/resnet.py | 163 + .../nn/model/vision_transformer/__init__.py | 3 + .../vision_transformer/vision_transformer.py | 87 + colossalai/nn/multi_tensor_apply/__init__.py | 3 + .../multi_tensor_apply/multi_tensor_apply.py | 31 + colossalai/nn/optimizer/__init__.py | 14 + colossalai/nn/optimizer/_utils.py | 168 + colossalai/nn/optimizer/fp16_optimizer.py | 507 +++ colossalai/nn/optimizer/fused_adam.py | 163 + colossalai/nn/optimizer/fused_lamb.py | 212 + colossalai/nn/optimizer/fused_sgd.py | 227 ++ colossalai/nn/optimizer/lamb.py | 114 + colossalai/nn/optimizer/lars.py | 99 + colossalai/nn/optimizer/loss_scaler.py | 166 + .../zero_redundancy_optimizer_level_1.py | 707 ++++ .../zero_redundancy_optimizer_level_2.py | 2343 +++++++++++ .../zero_redundancy_optimizer_level_3.py | 3598 +++++++++++++++++ colossalai/registry/__init__.py | 22 + colossalai/registry/registry.py | 82 + colossalai/trainer/__init__.py | 5 + colossalai/trainer/_trainer.py | 333 ++ colossalai/trainer/hooks/__init__.py | 11 + colossalai/trainer/hooks/_base_hook.py | 107 + colossalai/trainer/hooks/_checkpoint_hook.py | 110 + colossalai/trainer/hooks/_log_hook.py | 247 ++ colossalai/trainer/hooks/_metric_hook.py | 185 + colossalai/trainer/metric.py | 307 ++ colossalai/utils/__init__.py | 22 + colossalai/utils/activation_checkpoint.py | 117 + colossalai/utils/common.py | 42 + colossalai/utils/cuda.py | 48 + colossalai/utils/memory.py | 49 + colossalai/utils/timer.py | 143 + configs/resnet/resnet50.py | 77 + configs/sample_config.py | 23 + configs/vit/vit_2d.py | 161 + configs/vit/vit_3d.py | 109 + csrc/colossal_C_frontend.cpp | 71 + csrc/compat.h | 10 + csrc/multi_tensor_adam.cu | 177 + csrc/multi_tensor_apply.cuh | 133 + csrc/multi_tensor_l2norm_kernel.cu | 455 +++ csrc/multi_tensor_lamb.cu | 427 ++ csrc/multi_tensor_scale_kernel.cu | 136 + csrc/multi_tensor_sgd_kernel.cu | 282 ++ csrc/type_shim.h | 202 + docs/Makefile | 26 + docs/_static/css/rtd_theme.css | 3 + docs/_templates/apidoc/module.rst_t | 9 + docs/_templates/apidoc/package.rst_t | 52 + docs/_templates/apidoc/toc.rst_t | 8 + docs/add_your_parallel.md | 120 + docs/amp.md | 85 + .../colossalai/colossalai.builder.builder.rst | 5 + .../colossalai.builder.pipeline.rst | 5 + docs/colossalai/colossalai.builder.rst | 12 + docs/colossalai/colossalai.checkpointing.rst | 5 + .../colossalai.communication.collective.rst | 5 + .../colossalai.communication.p2p.rst | 5 + .../colossalai.communication.ring.rst | 5 + docs/colossalai/colossalai.communication.rst | 14 + .../colossalai.communication.utils.rst | 5 + docs/colossalai/colossalai.constants.rst | 5 + docs/colossalai/colossalai.context.config.rst | 5 + .../colossalai.context.parallel_context.rst | 5 + .../colossalai.context.parallel_mode.rst | 5 + ...ocess_group_initializer.initializer_1d.rst | 5 + ...ocess_group_initializer.initializer_2d.rst | 5 + ...ess_group_initializer.initializer_2p5d.rst | 5 + ...ocess_group_initializer.initializer_3d.rst | 5 + ...ess_group_initializer.initializer_data.rst | 5 + ...group_initializer.initializer_pipeline.rst | 5 + ...group_initializer.initializer_sequence.rst | 5 + ...s_group_initializer.initializer_tensor.rst | 5 + ..._initializer.process_group_initializer.rst | 5 + ...alai.context.process_group_initializer.rst | 19 + docs/colossalai/colossalai.context.random.rst | 11 + ...colossalai.context.random.seed_manager.rst | 5 + docs/colossalai/colossalai.context.rst | 19 + docs/colossalai/colossalai.core.rst | 5 + .../colossalai/colossalai.engine.amp_type.rst | 5 + .../colossalai.engine.gradient_handler.rst | 5 + docs/colossalai/colossalai.engine.rst | 17 + .../colossalai/colossalai.engine.schedule.rst | 5 + docs/colossalai/colossalai.initialize.rst | 5 + .../colossalai/colossalai.logging.logging.rst | 5 + docs/colossalai/colossalai.logging.rst | 11 + .../colossalai.nn.data.base_dataset.rst | 5 + .../colossalai.nn.data.caltech101_dataset.rst | 5 + .../colossalai.nn.data.cifar10_dataset.rst | 5 + .../colossalai.nn.data.prefetcher.rst | 5 + docs/colossalai/colossalai.nn.data.rst | 20 + ...olossalai.nn.data.sampler.base_sampler.rst | 5 + ....nn.data.sampler.data_parallel_sampler.rst | 5 + .../colossalai/colossalai.nn.data.sampler.rst | 12 + .../colossalai.nn.data.wiki_dataset.rst | 5 + .../colossalai.nn.layer.base_layer.rst | 5 + ...colossalai.nn.layer.parallel_1d.layers.rst | 5 + .../colossalai.nn.layer.parallel_1d.rst | 11 + ...colossalai.nn.layer.parallel_2d.layers.rst | 5 + .../colossalai.nn.layer.parallel_2d.rst | 11 + ...lossalai.nn.layer.parallel_2p5d.layers.rst | 5 + .../colossalai.nn.layer.parallel_2p5d.rst | 11 + ...colossalai.nn.layer.parallel_3d.layers.rst | 5 + .../colossalai.nn.layer.parallel_3d.rst | 11 + ...alai.nn.layer.parallel_sequence.layers.rst | 5 + .../colossalai.nn.layer.parallel_sequence.rst | 11 + ...yer.parallel_vision_transformer.layers.rst | 5 + ...i.nn.layer.parallel_vision_transformer.rst | 11 + docs/colossalai/colossalai.nn.layer.rst | 24 + ...ai.nn.layer.vanilla_resnet.basic_block.rst | 5 + ...lai.nn.layer.vanilla_resnet.bottleneck.rst | 5 + ...olossalai.nn.layer.vanilla_resnet.conv.rst | 5 + ...salai.nn.layer.vanilla_resnet.reslayer.rst | 5 + .../colossalai.nn.layer.vanilla_resnet.rst | 14 + ...ayer.vanilla_vision_transformer.layers.rst | 5 + ...ai.nn.layer.vanilla_vision_transformer.rst | 11 + ...ssalai.nn.layer.wrapper.lambda_wrapper.rst | 5 + .../colossalai.nn.layer.wrapper.rst | 11 + .../colossalai.nn.loss.base_loss.rst | 5 + .../colossalai.nn.loss.cross_entropy_1d.rst | 5 + .../colossalai.nn.loss.cross_entropy_2d.rst | 5 + .../colossalai.nn.loss.cross_entropy_2p5d.rst | 5 + .../colossalai.nn.loss.cross_entropy_3d.rst | 5 + docs/colossalai/colossalai.nn.loss.rst | 15 + .../colossalai.nn.lr_scheduler.cosine.rst | 5 + .../colossalai.nn.lr_scheduler.delayed.rst | 5 + .../colossalai.nn.lr_scheduler.linear.rst | 5 + .../colossalai.nn.lr_scheduler.multistep.rst | 5 + .../colossalai.nn.lr_scheduler.onecycle.rst | 5 + .../colossalai.nn.lr_scheduler.poly.rst | 5 + .../colossalai/colossalai.nn.lr_scheduler.rst | 17 + .../colossalai.nn.lr_scheduler.torch.rst | 5 + .../colossalai.nn.model.base_model.rst | 5 + .../colossalai.nn.model.bert.bert.rst | 5 + docs/colossalai/colossalai.nn.model.bert.rst | 11 + docs/colossalai/colossalai.nn.model.rst | 18 + ...ossalai.nn.model.vanilla_resnet.resnet.rst | 5 + .../colossalai.nn.model.vanilla_resnet.rst | 11 + ...colossalai.nn.model.vision_transformer.rst | 11 + ....vision_transformer.vision_transformer.rst | 5 + ....multi_tensor_apply.multi_tensor_apply.rst | 5 + .../colossalai.nn.multi_tensor_apply.rst | 11 + ...colossalai.nn.optimizer.fp16_optimizer.rst | 5 + .../colossalai.nn.optimizer.fused_adam.rst | 5 + .../colossalai.nn.optimizer.fused_lamb.rst | 5 + .../colossalai.nn.optimizer.fused_sgd.rst | 5 + .../colossalai.nn.optimizer.lamb.rst | 5 + .../colossalai.nn.optimizer.loss_scaler.rst | 5 + docs/colossalai/colossalai.nn.optimizer.rst | 19 + ...izer.zero_redundancy_optimizer_level_1.rst | 5 + ...izer.zero_redundancy_optimizer_level_2.rst | 5 + ...izer.zero_redundancy_optimizer_level_3.rst | 5 + docs/colossalai/colossalai.nn.rst | 16 + .../colossalai.registry.registry.rst | 5 + docs/colossalai/colossalai.registry.rst | 11 + docs/colossalai/colossalai.rst | 27 + docs/colossalai/colossalai.trainer.hooks.rst | 5 + docs/colossalai/colossalai.trainer.metric.rst | 5 + docs/colossalai/colossalai.trainer.rst | 16 + ...colossalai.utils.activation_checkpoint.rst | 5 + docs/colossalai/colossalai.utils.common.rst | 5 + docs/colossalai/colossalai.utils.cuda.rst | 5 + docs/colossalai/colossalai.utils.memory.rst | 5 + docs/colossalai/colossalai.utils.rst | 15 + docs/colossalai/colossalai.utils.timer.rst | 5 + docs/conf.py | 87 + docs/config.md | 187 + docs/index.rst | 40 + docs/installation.md | 25 + docs/make.bat | 35 + docs/model.md | 28 + docs/parallelization.md | 216 + docs/run_demo.md | 74 + docs/trainer_engine.md | 90 + docs/zero.md | 81 + examples/colossal_cifar_demo.ipynb | 370 ++ examples/run_trainer.py | 40 + model_zoo/__init__.py | 2 + model_zoo/bert/parallel_1d/.init | 0 model_zoo/bert/parallel_2d/.init | 0 model_zoo/bert/parallel_2p5d/.init | 0 model_zoo/bert/parallel_3d/.init | 0 model_zoo/mlp_mixer/__init__.py | 1 + model_zoo/mlp_mixer/parallel_1d/.init | 0 model_zoo/mlp_mixer/parallel_2d/.init | 0 model_zoo/mlp_mixer/parallel_2p5d/.init | 0 model_zoo/mlp_mixer/parallel_3d/__init__.py | 1 + model_zoo/mlp_mixer/parallel_3d/mlp_mixer.py | 63 + model_zoo/vit/__init__.py | 2 + model_zoo/vit/parallel_1d/.init | 0 model_zoo/vit/parallel_2d/__init__.py | 1 + model_zoo/vit/parallel_2d/vit.py | 219 + model_zoo/vit/parallel_2p5d/.init | 0 model_zoo/vit/parallel_3d/__init__.py | 1 + model_zoo/vit/parallel_3d/vit.py | 209 + pytest.ini | 6 + requirements/requirements-test.txt | 3 + requirements/requirements.txt | 7 + scripts/slurm_dist_train.sh | 11 + setup.py | 175 + tests/test_config/sample_config.py | 27 + tests/test_config/test_load_config.py | 19 + .../test_context/configs/parallel_2d_init.py | 10 + .../configs/parallel_2p5d_init.py | 11 + .../test_context/configs/parallel_3d_init.py | 10 + tests/test_context/test_2d_init.py | 96 + tests/test_context/test_2p5d_init.py | 118 + tests/test_context/test_3d_init.py | 111 + tests/test_data/test_cifar10_dataset.py | 43 + tests/test_data/test_data_parallel_sampler.py | 86 + .../test_deterministic_dataloader.py | 87 + .../configs/vit_2d.py | 159 + .../configs/vit_2p5d.py | 137 + .../test.sh | 4 + .../test_vit_2d/test_vit_2d.py | 93 + .../test_vit_2p5d/test_vit_2p5d.py | 94 + .../configs/non_pipeline_resnet.py | 42 + .../configs/non_pipeline_resnet_apex_amp.py | 45 + .../configs/non_pipeline_resnet_torch_amp.py | 45 + .../configs/pipeline_vanilla_resnet.py | 48 + tests/test_engine/test.sh | 4 + .../test_engine_apex_amp.py | 54 + .../test_engine_no_amp.py | 57 + .../test_engine_torch_amp.py | 57 + .../test_pipeline/debug_schedule.py | 232 ++ tests/test_engine/test_pipeline/test_p2p.py | 149 + .../test_pipeline/test_partition.py | 37 + .../test_pipeline/test_schedule.py | 38 + .../test_pipeline_engine/test_engine.py | 54 + tests/test_fp16_optimizer/configs/vit_2d.py | 140 + tests/test_fp16_optimizer/test.sh | 4 + .../test_vit_2d/test_vit_2d.py | 88 + tests/test_layers/test.sh | 4 + tests/test_layers/test_1d/common.py | 13 + tests/test_layers/test_1d/test_1d.py | 38 + tests/test_layers/test_1d/test_layer.py | 211 + tests/test_layers/test_2d/common.py | 13 + tests/test_layers/test_2d/test_2d.py | 47 + tests/test_layers/test_2d/test_layer.py | 248 ++ tests/test_layers/test_2d/test_operation.py | 240 ++ tests/test_layers/test_2p5d/common.py | 11 + tests/test_layers/test_2p5d/test.sh | 3 + tests/test_layers/test_2p5d/test_2p5d.py | 41 + tests/test_layers/test_2p5d/test_layer.py | 265 ++ tests/test_layers/test_2p5d/test_operation.py | 239 ++ tests/test_layers/test_3d/common.py | 15 + tests/test_layers/test_3d/test.sh | 22 + tests/test_layers/test_3d/test_3d.py | 58 + tests/test_layers/test_3d/test_conn.py | 19 + tests/test_layers/test_3d/test_layer.py | 640 +++ tests/test_layers/test_3d/test_operation.py | 465 +++ tests/test_layers/test_sequence/test_layer.py | 26 + .../test_sequence/test_sequence.py | 34 + tests/test_lr_scheduler/test_lr_scheduler.py | 69 + .../test_vanilla_resnet.py | 98 + .../test_vision_transformer/configs/vit_2d.py | 107 + .../configs/vit_2p5d.py | 137 + .../test_vision_transformer/configs/vit_3d.py | 138 + .../configs/vit_vanilla.py | 56 + .../test_vision_transformer/test.sh | 4 + .../2d-nproc4-lr1e-3/acc-2D-lr1e-3.jpg | Bin 0 -> 29576 bytes .../2d-nproc4-lr1e-3/alignment.o3475503 | 177 + .../2d-nproc4-lr1e-3/loss-2D-lr1e-3.jpg | Bin 0 -> 37591 bytes .../2d-nproc4-lr1e-4/acc-2D-lr1e-4.jpg | Bin 0 -> 29143 bytes .../2d-nproc4-lr1e-4/alignment.o3472937 | 177 + .../2d-nproc4-lr1e-4/loss-2D-lr1e-4.jpg | Bin 0 -> 36154 bytes .../acc-vanilla-lr1e-3.jpg | Bin 0 -> 30033 bytes .../vanilla-nproc1-lr1e-3/alignment.o3476018 | 165 + .../loss-vanilla-lr1e-3.jpg | Bin 0 -> 37624 bytes .../test_vit_2d/test_vit_2d.py | 87 + .../test_vit_2p5d/2p5d.py | 88 + .../test_vit_2p5d/log/111log1e-3.txt | 103 + .../test_vit_2p5d/log/111log1e-3hxmodel.txt | 196 + .../test_vit_2p5d/log/111log1e-4.txt | 103 + .../test_vit_2p5d/log/111log1e-4hxmodel.txt | 195 + .../test_vit_2p5d/log/421log1e-3.txt | 115 + .../test_vit_2p5d/log/421log1e-4.txt | 115 + .../test_vit_2p5d/log/822log1e-3.txt | 131 + .../test_vit_2p5d/log/822log1e-4.txt | 131 + .../test_vit_2p5d/test_vit_2p5d.py | 88 + .../test_vit_3d/test_vit_3d.py | 114 + .../test_vit_vanilla.py | 28 + .../configs/test_trainer_resnet.py | 94 + .../configs/test_trainer_vit_2d.py | 135 + tests/test_trainer/test.sh | 5 + tests/test_trainer/test_trainer.py | 37 + .../test_activation_checkpointing.py | 60 + tests/test_zero_data_parallel/config.py | 91 + tests/test_zero_data_parallel/test_zero.py | 153 + tests/test_zero_data_parallel/test_zero.sh | 4 + .../configs/vit_2d_zero1.py | 159 + .../configs/vit_2d_zero2.py | 149 + .../configs/vit_2d_zero3.py | 149 + tests/test_zero_tensor_parallel/test.sh | 4 + .../test_vit_2d/test_vit_2d.py | 94 + 409 files changed, 35853 insertions(+) create mode 100644 .gitignore create mode 100644 MANIFEST.in create mode 100644 README.md create mode 100644 colossalai/__init__.py create mode 100644 colossalai/builder/__init__.py create mode 100644 colossalai/builder/builder.py create mode 100644 colossalai/builder/pipeline.py create mode 100644 colossalai/checkpointing.py create mode 100644 colossalai/communication/__init__.py create mode 100644 colossalai/communication/collective.py create mode 100644 colossalai/communication/p2p.py create mode 100644 colossalai/communication/ring.py create mode 100644 colossalai/communication/utils.py create mode 100644 colossalai/constants.py create mode 100644 colossalai/context/__init__.py create mode 100644 colossalai/context/_utils.py create mode 100644 colossalai/context/config.py create mode 100644 colossalai/context/parallel_context.py create mode 100644 colossalai/context/parallel_mode.py create mode 100644 colossalai/context/process_group_initializer/__init__.py create mode 100644 colossalai/context/process_group_initializer/initializer_1d.py create mode 100644 colossalai/context/process_group_initializer/initializer_2d.py create mode 100644 colossalai/context/process_group_initializer/initializer_2p5d.py create mode 100644 colossalai/context/process_group_initializer/initializer_3d.py create mode 100644 colossalai/context/process_group_initializer/initializer_data.py create mode 100644 colossalai/context/process_group_initializer/initializer_pipeline.py create mode 100644 colossalai/context/process_group_initializer/initializer_sequence.py create mode 100644 colossalai/context/process_group_initializer/initializer_tensor.py create mode 100644 colossalai/context/process_group_initializer/process_group_initializer.py create mode 100644 colossalai/context/random/__init__.py create mode 100644 colossalai/context/random/_helper.py create mode 100644 colossalai/context/random/seed_manager.py create mode 100644 colossalai/core.py create mode 100644 colossalai/engine/__init__.py create mode 100644 colossalai/engine/_base_engine.py create mode 100644 colossalai/engine/amp_type.py create mode 100644 colossalai/engine/gradient_handler/__init__.py create mode 100644 colossalai/engine/gradient_handler/_base_gradient_handler.py create mode 100644 colossalai/engine/gradient_handler/_data_parallel_gradient_handler.py create mode 100644 colossalai/engine/gradient_handler/_zero_gradient_handler.py create mode 100644 colossalai/engine/schedule/__init__.py create mode 100644 colossalai/engine/schedule/_base_schedule.py create mode 100644 colossalai/engine/schedule/_no_pipeline.py create mode 100644 colossalai/engine/schedule/_pipeline.py create mode 100644 colossalai/engine/schedule/_utils.py create mode 100644 colossalai/initialize.py create mode 100644 colossalai/logging/__init__.py create mode 100644 colossalai/logging/logging.py create mode 100644 colossalai/nn/__init__.py create mode 100644 colossalai/nn/data/__init__.py create mode 100644 colossalai/nn/data/_utils.py create mode 100644 colossalai/nn/data/base_dataset.py create mode 100644 colossalai/nn/data/caltech101_dataset.py create mode 100644 colossalai/nn/data/cifar10_dataset.py create mode 100644 colossalai/nn/data/sampler/__init__.py create mode 100644 colossalai/nn/data/sampler/base_sampler.py create mode 100644 colossalai/nn/data/sampler/data_parallel_sampler.py create mode 100644 colossalai/nn/layer/__init__.py create mode 100644 colossalai/nn/layer/_common_utils.py create mode 100644 colossalai/nn/layer/_parallel_utilities.py create mode 100644 colossalai/nn/layer/base_layer.py create mode 100644 colossalai/nn/layer/parallel_1d/__init__.py create mode 100644 colossalai/nn/layer/parallel_1d/_utils.py create mode 100644 colossalai/nn/layer/parallel_1d/layers.py create mode 100644 colossalai/nn/layer/parallel_2d/__init__.py create mode 100644 colossalai/nn/layer/parallel_2d/_operation.py create mode 100644 colossalai/nn/layer/parallel_2d/_transformer.py create mode 100644 colossalai/nn/layer/parallel_2d/_utils.py create mode 100644 colossalai/nn/layer/parallel_2d/_vit.py create mode 100644 colossalai/nn/layer/parallel_2d/layers.py create mode 100644 colossalai/nn/layer/parallel_2p5d/__init__.py create mode 100644 colossalai/nn/layer/parallel_2p5d/_operation.py create mode 100644 colossalai/nn/layer/parallel_2p5d/_transformer.py create mode 100644 colossalai/nn/layer/parallel_2p5d/_utils.py create mode 100644 colossalai/nn/layer/parallel_2p5d/_vit.py create mode 100644 colossalai/nn/layer/parallel_2p5d/layers.py create mode 100644 colossalai/nn/layer/parallel_3d/__init__.py create mode 100644 colossalai/nn/layer/parallel_3d/_operation.py create mode 100644 colossalai/nn/layer/parallel_3d/_utils.py create mode 100644 colossalai/nn/layer/parallel_3d/_vit.py create mode 100644 colossalai/nn/layer/parallel_3d/layers.py create mode 100644 colossalai/nn/layer/parallel_sequence/__init__.py create mode 100644 colossalai/nn/layer/parallel_sequence/_operation.py create mode 100644 colossalai/nn/layer/parallel_sequence/_utils.py create mode 100644 colossalai/nn/layer/parallel_sequence/layers.py create mode 100644 colossalai/nn/layer/parallel_vision_transformer/__init__.py create mode 100644 colossalai/nn/layer/parallel_vision_transformer/layers.py create mode 100644 colossalai/nn/layer/vanilla_resnet/__init__.py create mode 100644 colossalai/nn/layer/vanilla_resnet/basic_block.py create mode 100644 colossalai/nn/layer/vanilla_resnet/bottleneck.py create mode 100644 colossalai/nn/layer/vanilla_resnet/conv.py create mode 100644 colossalai/nn/layer/vanilla_resnet/reslayer.py create mode 100644 colossalai/nn/layer/vanilla_vision_transformer/__init__.py create mode 100644 colossalai/nn/layer/vanilla_vision_transformer/layers.py create mode 100644 colossalai/nn/layer/wrapper/__init__.py create mode 100644 colossalai/nn/layer/wrapper/lambda_wrapper.py create mode 100644 colossalai/nn/loss/__init__.py create mode 100644 colossalai/nn/loss/base_loss.py create mode 100644 colossalai/nn/loss/cross_entropy_1d.py create mode 100644 colossalai/nn/loss/cross_entropy_2d.py create mode 100644 colossalai/nn/loss/cross_entropy_2p5d.py create mode 100644 colossalai/nn/loss/cross_entropy_3d.py create mode 100644 colossalai/nn/lr_scheduler/__init__.py create mode 100644 colossalai/nn/lr_scheduler/cosine.py create mode 100644 colossalai/nn/lr_scheduler/delayed.py create mode 100644 colossalai/nn/lr_scheduler/linear.py create mode 100644 colossalai/nn/lr_scheduler/multistep.py create mode 100644 colossalai/nn/lr_scheduler/onecycle.py create mode 100644 colossalai/nn/lr_scheduler/poly.py create mode 100644 colossalai/nn/lr_scheduler/torch.py create mode 100644 colossalai/nn/model/__init__.py create mode 100644 colossalai/nn/model/base_model.py create mode 100644 colossalai/nn/model/vanilla_resnet/__init__.py create mode 100644 colossalai/nn/model/vanilla_resnet/resnet.py create mode 100644 colossalai/nn/model/vision_transformer/__init__.py create mode 100644 colossalai/nn/model/vision_transformer/vision_transformer.py create mode 100644 colossalai/nn/multi_tensor_apply/__init__.py create mode 100644 colossalai/nn/multi_tensor_apply/multi_tensor_apply.py create mode 100644 colossalai/nn/optimizer/__init__.py create mode 100644 colossalai/nn/optimizer/_utils.py create mode 100644 colossalai/nn/optimizer/fp16_optimizer.py create mode 100644 colossalai/nn/optimizer/fused_adam.py create mode 100644 colossalai/nn/optimizer/fused_lamb.py create mode 100644 colossalai/nn/optimizer/fused_sgd.py create mode 100644 colossalai/nn/optimizer/lamb.py create mode 100644 colossalai/nn/optimizer/lars.py create mode 100644 colossalai/nn/optimizer/loss_scaler.py create mode 100644 colossalai/nn/optimizer/zero_redundancy_optimizer_level_1.py create mode 100644 colossalai/nn/optimizer/zero_redundancy_optimizer_level_2.py create mode 100644 colossalai/nn/optimizer/zero_redundancy_optimizer_level_3.py create mode 100644 colossalai/registry/__init__.py create mode 100644 colossalai/registry/registry.py create mode 100644 colossalai/trainer/__init__.py create mode 100644 colossalai/trainer/_trainer.py create mode 100644 colossalai/trainer/hooks/__init__.py create mode 100644 colossalai/trainer/hooks/_base_hook.py create mode 100644 colossalai/trainer/hooks/_checkpoint_hook.py create mode 100644 colossalai/trainer/hooks/_log_hook.py create mode 100644 colossalai/trainer/hooks/_metric_hook.py create mode 100644 colossalai/trainer/metric.py create mode 100644 colossalai/utils/__init__.py create mode 100644 colossalai/utils/activation_checkpoint.py create mode 100644 colossalai/utils/common.py create mode 100644 colossalai/utils/cuda.py create mode 100644 colossalai/utils/memory.py create mode 100644 colossalai/utils/timer.py create mode 100644 configs/resnet/resnet50.py create mode 100644 configs/sample_config.py create mode 100644 configs/vit/vit_2d.py create mode 100644 configs/vit/vit_3d.py create mode 100644 csrc/colossal_C_frontend.cpp create mode 100644 csrc/compat.h create mode 100644 csrc/multi_tensor_adam.cu create mode 100644 csrc/multi_tensor_apply.cuh create mode 100644 csrc/multi_tensor_l2norm_kernel.cu create mode 100644 csrc/multi_tensor_lamb.cu create mode 100644 csrc/multi_tensor_scale_kernel.cu create mode 100644 csrc/multi_tensor_sgd_kernel.cu create mode 100644 csrc/type_shim.h create mode 100644 docs/Makefile create mode 100644 docs/_static/css/rtd_theme.css create mode 100644 docs/_templates/apidoc/module.rst_t create mode 100644 docs/_templates/apidoc/package.rst_t create mode 100644 docs/_templates/apidoc/toc.rst_t create mode 100644 docs/add_your_parallel.md create mode 100644 docs/amp.md create mode 100644 docs/colossalai/colossalai.builder.builder.rst create mode 100644 docs/colossalai/colossalai.builder.pipeline.rst create mode 100644 docs/colossalai/colossalai.builder.rst create mode 100644 docs/colossalai/colossalai.checkpointing.rst create mode 100644 docs/colossalai/colossalai.communication.collective.rst create mode 100644 docs/colossalai/colossalai.communication.p2p.rst create mode 100644 docs/colossalai/colossalai.communication.ring.rst create mode 100644 docs/colossalai/colossalai.communication.rst create mode 100644 docs/colossalai/colossalai.communication.utils.rst create mode 100644 docs/colossalai/colossalai.constants.rst create mode 100644 docs/colossalai/colossalai.context.config.rst create mode 100644 docs/colossalai/colossalai.context.parallel_context.rst create mode 100644 docs/colossalai/colossalai.context.parallel_mode.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_1d.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_2d.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_2p5d.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_3d.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_data.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_pipeline.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_sequence.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.initializer_tensor.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.process_group_initializer.rst create mode 100644 docs/colossalai/colossalai.context.process_group_initializer.rst create mode 100644 docs/colossalai/colossalai.context.random.rst create mode 100644 docs/colossalai/colossalai.context.random.seed_manager.rst create mode 100644 docs/colossalai/colossalai.context.rst create mode 100644 docs/colossalai/colossalai.core.rst create mode 100644 docs/colossalai/colossalai.engine.amp_type.rst create mode 100644 docs/colossalai/colossalai.engine.gradient_handler.rst create mode 100644 docs/colossalai/colossalai.engine.rst create mode 100644 docs/colossalai/colossalai.engine.schedule.rst create mode 100644 docs/colossalai/colossalai.initialize.rst create mode 100644 docs/colossalai/colossalai.logging.logging.rst create mode 100644 docs/colossalai/colossalai.logging.rst create mode 100644 docs/colossalai/colossalai.nn.data.base_dataset.rst create mode 100644 docs/colossalai/colossalai.nn.data.caltech101_dataset.rst create mode 100644 docs/colossalai/colossalai.nn.data.cifar10_dataset.rst create mode 100644 docs/colossalai/colossalai.nn.data.prefetcher.rst create mode 100644 docs/colossalai/colossalai.nn.data.rst create mode 100644 docs/colossalai/colossalai.nn.data.sampler.base_sampler.rst create mode 100644 docs/colossalai/colossalai.nn.data.sampler.data_parallel_sampler.rst create mode 100644 docs/colossalai/colossalai.nn.data.sampler.rst create mode 100644 docs/colossalai/colossalai.nn.data.wiki_dataset.rst create mode 100644 docs/colossalai/colossalai.nn.layer.base_layer.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_1d.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_1d.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_2d.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_2d.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_2p5d.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_2p5d.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_3d.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_3d.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_sequence.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_sequence.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.rst create mode 100644 docs/colossalai/colossalai.nn.layer.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_resnet.basic_block.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_resnet.bottleneck.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_resnet.conv.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_resnet.reslayer.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_resnet.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.layers.rst create mode 100644 docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.rst create mode 100644 docs/colossalai/colossalai.nn.layer.wrapper.lambda_wrapper.rst create mode 100644 docs/colossalai/colossalai.nn.layer.wrapper.rst create mode 100644 docs/colossalai/colossalai.nn.loss.base_loss.rst create mode 100644 docs/colossalai/colossalai.nn.loss.cross_entropy_1d.rst create mode 100644 docs/colossalai/colossalai.nn.loss.cross_entropy_2d.rst create mode 100644 docs/colossalai/colossalai.nn.loss.cross_entropy_2p5d.rst create mode 100644 docs/colossalai/colossalai.nn.loss.cross_entropy_3d.rst create mode 100644 docs/colossalai/colossalai.nn.loss.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.cosine.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.delayed.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.linear.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.multistep.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.onecycle.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.poly.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.rst create mode 100644 docs/colossalai/colossalai.nn.lr_scheduler.torch.rst create mode 100644 docs/colossalai/colossalai.nn.model.base_model.rst create mode 100644 docs/colossalai/colossalai.nn.model.bert.bert.rst create mode 100644 docs/colossalai/colossalai.nn.model.bert.rst create mode 100644 docs/colossalai/colossalai.nn.model.rst create mode 100644 docs/colossalai/colossalai.nn.model.vanilla_resnet.resnet.rst create mode 100644 docs/colossalai/colossalai.nn.model.vanilla_resnet.rst create mode 100644 docs/colossalai/colossalai.nn.model.vision_transformer.rst create mode 100644 docs/colossalai/colossalai.nn.model.vision_transformer.vision_transformer.rst create mode 100644 docs/colossalai/colossalai.nn.multi_tensor_apply.multi_tensor_apply.rst create mode 100644 docs/colossalai/colossalai.nn.multi_tensor_apply.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.fp16_optimizer.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.fused_adam.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.fused_lamb.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.fused_sgd.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.lamb.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.loss_scaler.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_1.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_2.rst create mode 100644 docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_3.rst create mode 100644 docs/colossalai/colossalai.nn.rst create mode 100644 docs/colossalai/colossalai.registry.registry.rst create mode 100644 docs/colossalai/colossalai.registry.rst create mode 100644 docs/colossalai/colossalai.rst create mode 100644 docs/colossalai/colossalai.trainer.hooks.rst create mode 100644 docs/colossalai/colossalai.trainer.metric.rst create mode 100644 docs/colossalai/colossalai.trainer.rst create mode 100644 docs/colossalai/colossalai.utils.activation_checkpoint.rst create mode 100644 docs/colossalai/colossalai.utils.common.rst create mode 100644 docs/colossalai/colossalai.utils.cuda.rst create mode 100644 docs/colossalai/colossalai.utils.memory.rst create mode 100644 docs/colossalai/colossalai.utils.rst create mode 100644 docs/colossalai/colossalai.utils.timer.rst create mode 100644 docs/conf.py create mode 100644 docs/config.md create mode 100644 docs/index.rst create mode 100644 docs/installation.md create mode 100644 docs/make.bat create mode 100644 docs/model.md create mode 100644 docs/parallelization.md create mode 100644 docs/run_demo.md create mode 100644 docs/trainer_engine.md create mode 100644 docs/zero.md create mode 100644 examples/colossal_cifar_demo.ipynb create mode 100644 examples/run_trainer.py create mode 100644 model_zoo/__init__.py create mode 100644 model_zoo/bert/parallel_1d/.init create mode 100644 model_zoo/bert/parallel_2d/.init create mode 100644 model_zoo/bert/parallel_2p5d/.init create mode 100644 model_zoo/bert/parallel_3d/.init create mode 100644 model_zoo/mlp_mixer/__init__.py create mode 100644 model_zoo/mlp_mixer/parallel_1d/.init create mode 100644 model_zoo/mlp_mixer/parallel_2d/.init create mode 100644 model_zoo/mlp_mixer/parallel_2p5d/.init create mode 100644 model_zoo/mlp_mixer/parallel_3d/__init__.py create mode 100644 model_zoo/mlp_mixer/parallel_3d/mlp_mixer.py create mode 100644 model_zoo/vit/__init__.py create mode 100644 model_zoo/vit/parallel_1d/.init create mode 100644 model_zoo/vit/parallel_2d/__init__.py create mode 100644 model_zoo/vit/parallel_2d/vit.py create mode 100644 model_zoo/vit/parallel_2p5d/.init create mode 100644 model_zoo/vit/parallel_3d/__init__.py create mode 100644 model_zoo/vit/parallel_3d/vit.py create mode 100644 pytest.ini create mode 100644 requirements/requirements-test.txt create mode 100644 requirements/requirements.txt create mode 100644 scripts/slurm_dist_train.sh create mode 100644 setup.py create mode 100644 tests/test_config/sample_config.py create mode 100644 tests/test_config/test_load_config.py create mode 100644 tests/test_context/configs/parallel_2d_init.py create mode 100644 tests/test_context/configs/parallel_2p5d_init.py create mode 100644 tests/test_context/configs/parallel_3d_init.py create mode 100644 tests/test_context/test_2d_init.py create mode 100644 tests/test_context/test_2p5d_init.py create mode 100644 tests/test_context/test_3d_init.py create mode 100644 tests/test_data/test_cifar10_dataset.py create mode 100644 tests/test_data/test_data_parallel_sampler.py create mode 100644 tests/test_data/test_deterministic_dataloader.py create mode 100644 tests/test_data_pipeline_tensor_parallel/configs/vit_2d.py create mode 100644 tests/test_data_pipeline_tensor_parallel/configs/vit_2p5d.py create mode 100644 tests/test_data_pipeline_tensor_parallel/test.sh create mode 100644 tests/test_data_pipeline_tensor_parallel/test_vit_2d/test_vit_2d.py create mode 100644 tests/test_data_pipeline_tensor_parallel/test_vit_2p5d/test_vit_2p5d.py create mode 100644 tests/test_engine/configs/non_pipeline_resnet.py create mode 100644 tests/test_engine/configs/non_pipeline_resnet_apex_amp.py create mode 100644 tests/test_engine/configs/non_pipeline_resnet_torch_amp.py create mode 100644 tests/test_engine/configs/pipeline_vanilla_resnet.py create mode 100644 tests/test_engine/test.sh create mode 100644 tests/test_engine/test_non_pipeline_engine/test_engine_apex_amp.py create mode 100644 tests/test_engine/test_non_pipeline_engine/test_engine_no_amp.py create mode 100644 tests/test_engine/test_non_pipeline_engine/test_engine_torch_amp.py create mode 100644 tests/test_engine/test_pipeline/debug_schedule.py create mode 100644 tests/test_engine/test_pipeline/test_p2p.py create mode 100644 tests/test_engine/test_pipeline/test_partition.py create mode 100644 tests/test_engine/test_pipeline/test_schedule.py create mode 100644 tests/test_engine/test_pipeline_engine/test_engine.py create mode 100644 tests/test_fp16_optimizer/configs/vit_2d.py create mode 100644 tests/test_fp16_optimizer/test.sh create mode 100644 tests/test_fp16_optimizer/test_vit_2d/test_vit_2d.py create mode 100644 tests/test_layers/test.sh create mode 100644 tests/test_layers/test_1d/common.py create mode 100644 tests/test_layers/test_1d/test_1d.py create mode 100644 tests/test_layers/test_1d/test_layer.py create mode 100644 tests/test_layers/test_2d/common.py create mode 100644 tests/test_layers/test_2d/test_2d.py create mode 100644 tests/test_layers/test_2d/test_layer.py create mode 100644 tests/test_layers/test_2d/test_operation.py create mode 100644 tests/test_layers/test_2p5d/common.py create mode 100644 tests/test_layers/test_2p5d/test.sh create mode 100644 tests/test_layers/test_2p5d/test_2p5d.py create mode 100644 tests/test_layers/test_2p5d/test_layer.py create mode 100644 tests/test_layers/test_2p5d/test_operation.py create mode 100644 tests/test_layers/test_3d/common.py create mode 100644 tests/test_layers/test_3d/test.sh create mode 100644 tests/test_layers/test_3d/test_3d.py create mode 100644 tests/test_layers/test_3d/test_conn.py create mode 100644 tests/test_layers/test_3d/test_layer.py create mode 100644 tests/test_layers/test_3d/test_operation.py create mode 100644 tests/test_layers/test_sequence/test_layer.py create mode 100644 tests/test_layers/test_sequence/test_sequence.py create mode 100644 tests/test_lr_scheduler/test_lr_scheduler.py create mode 100644 tests/test_models/test_vanilla_resnet/test_vanilla_resnet.py create mode 100644 tests/test_models/test_vision_transformer/configs/vit_2d.py create mode 100644 tests/test_models/test_vision_transformer/configs/vit_2p5d.py create mode 100644 tests/test_models/test_vision_transformer/configs/vit_3d.py create mode 100644 tests/test_models/test_vision_transformer/configs/vit_vanilla.py create mode 100644 tests/test_models/test_vision_transformer/test.sh create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-3/acc-2D-lr1e-3.jpg create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-3/alignment.o3475503 create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-3/loss-2D-lr1e-3.jpg create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-4/acc-2D-lr1e-4.jpg create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-4/alignment.o3472937 create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-4/loss-2D-lr1e-4.jpg create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/acc-vanilla-lr1e-3.jpg create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/alignment.o3476018 create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/loss-vanilla-lr1e-3.jpg create mode 100644 tests/test_models/test_vision_transformer/test_vit_2d/test_vit_2d.py create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/2p5d.py create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3hxmodel.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4hxmodel.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-3.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-4.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-3.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-4.txt create mode 100644 tests/test_models/test_vision_transformer/test_vit_2p5d/test_vit_2p5d.py create mode 100644 tests/test_models/test_vision_transformer/test_vit_3d/test_vit_3d.py create mode 100644 tests/test_models/test_vision_transformer/test_vit_vanilla.py create mode 100644 tests/test_trainer/configs/test_trainer_resnet.py create mode 100644 tests/test_trainer/configs/test_trainer_vit_2d.py create mode 100644 tests/test_trainer/test.sh create mode 100644 tests/test_trainer/test_trainer.py create mode 100644 tests/test_utils/test_activation_checkpointing.py create mode 100644 tests/test_zero_data_parallel/config.py create mode 100644 tests/test_zero_data_parallel/test_zero.py create mode 100644 tests/test_zero_data_parallel/test_zero.sh create mode 100644 tests/test_zero_tensor_parallel/configs/vit_2d_zero1.py create mode 100644 tests/test_zero_tensor_parallel/configs/vit_2d_zero2.py create mode 100644 tests/test_zero_tensor_parallel/configs/vit_2d_zero3.py create mode 100644 tests/test_zero_tensor_parallel/test.sh create mode 100644 tests/test_zero_tensor_parallel/test_vit_2d/test_vit_2d.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..162eb26a9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,144 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +docs/.build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# IDE +.idea/ +.vscode/ + +# macos +.DS_Store +#data/ + +# launcher setting +tests/launcher/log +tests/launcher/personal + +docs/.build diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 000000000..a406adf97 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +include *.txt README.md +recursive-include requirements *.txt +recursive-include colossalai *.cpp *.h *.cu *.tr *.cuh *.cc +recursive-include csrc *.cpp *.h *.cu *.tr *.cuh *.cc \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 000000000..ef06b336d --- /dev/null +++ b/README.md @@ -0,0 +1,104 @@ +# ColossalAI + +An integrated large-scale model training framework with efficient parallelization techniques + +## Installation + +### PyPI + +```bash +pip install colossalai +``` + +### Install From Source + +```shell +git clone git@github.com:hpcaitech/ColossalAI.git +cd ColossalAI +# install dependency +pip install -r requirements/requirements.txt + +# install colossalai +pip install . +``` + +Install and enable CUDA kernel fusion (compulsory installation when using fused optimizer) + +```shell +pip install -v --no-cache-dir --global-option="--cuda_ext" . +``` + +## Documentation + +- [Documentation](https://www.colossalai.org/) + +## Quick View + +### Start Distributed Training in Lines + +```python +import colossalai +from colossalai.engine import Engine +from colossalai.trainer import Trainer +from colossalai.core import global_context as gpc + +model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize() +engine = Engine( + model=model, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule +) + +trainer = Trainer(engine=engine, + hooks_cfg=gpc.config.hooks, + verbose=True) +trainer.fit( + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + max_epochs=gpc.config.num_epochs, + display_progress=True, + test_interval=5 +) +``` + +### Write a Simple 2D Parallel Model + +Let's say we have a huge MLP model and its very large hidden size makes it difficult to fit into a single GPU. We can +then distribute the model weights across GPUs in a 2D mesh while you still write your model in a familiar way. + +```python +from colossalai.nn import Linear2D +import torch.nn as nn + + +class MLP_2D(nn.Module): + + def __init__(self): + super().__init__() + self.linear_1 = Linear2D(in_features=1024, out_features=16384) + self.linear_2 = Linear2D(in_features=16384, out_features=1024) + + def forward(self, x): + x = self.linear_1(x) + x = self.linear_2(x) + return x + +``` + +## Features + +ColossalAI provides a collection of parallel training components for you. We aim to support you to write your +distributed deep learning models just like how you write your single-GPU model. We provide friendly tools to kickstart +distributed training in a few lines. + +- [Data Parallelism](./docs/parallelization.md) +- [Pipeline Parallelism](./docs/parallelization.md) +- [1D, 2D, 2.5D, 3D and sequence parallelism](./docs/parallelization.md) +- [friendly trainer and engine](./docs/trainer_engine.md) +- [Extensible for new parallelism](./docs/add_your_parallel.md) +- [Mixed Precision Training](./docs/amp.md) +- [Zero Redundancy Optimizer (ZeRO)](./docs/zero.md) + + diff --git a/colossalai/__init__.py b/colossalai/__init__.py new file mode 100644 index 000000000..854d941bc --- /dev/null +++ b/colossalai/__init__.py @@ -0,0 +1,4 @@ +from .initialize import init_dist, initialize +from .nn import * + +__version__ = '0.0.1' diff --git a/colossalai/builder/__init__.py b/colossalai/builder/__init__.py new file mode 100644 index 000000000..17d643285 --- /dev/null +++ b/colossalai/builder/__init__.py @@ -0,0 +1,2 @@ +from .builder import * +from .pipeline import ModelInitializer diff --git a/colossalai/builder/builder.py b/colossalai/builder/builder.py new file mode 100644 index 000000000..f88dc1cbf --- /dev/null +++ b/colossalai/builder/builder.py @@ -0,0 +1,262 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import inspect +from collections.abc import Iterable + +from colossalai.registry import * + + +def build_from_config(module, config: dict): + """Returns an object of :class:`module` constructed from `config`. + + :param module: A python or user-defined class + :type module: class + :param config: A python dict containing information used in the construction + of the return object + :type config: dict + :raises AssertionError: Raises an AssertionError if `module` is not a class + :return: An object of :class:`module` + :rtype: :class:`module` + """ + assert inspect.isclass(module), 'module must be a class' + return module(**config) + + +def build_from_registry(config, registry: Registry): + """Returns an object constructed from `config`, the type of the object + is specified by `registry`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.colossalai.context.Config` + :param registry: A registry specifying the type of the return object + :type registry: :class:`Registry` + :raises AssertionError: Raises an AssertionError if `registry` is not an object + of :class:`Registry` or `mod_type` in `config` is not found in `registry` + :raises Exception: Raises an Exception if an error occurred when building + from registry + :return: An object specified by `registry` + :rtype: Python object specified by `registry` + """ + config_ = config.copy() # keep the original config untouched + assert isinstance( + registry, Registry), f'Expected type Registry but got {type(registry)}' + + mod_type = config_.pop('type') + assert registry.has( + mod_type), f'{mod_type} is not found in registry {registry.name}' + try: + obj = registry.get_module(mod_type)(**config_) + except Exception as e: + print( + f'An error occurred when building {mod_type} from registry {registry.name}', flush=True) + raise e + + return obj + + +def build_layer(config): + """Returns a layer object of :class:`nn.Module` constructed from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :return: An object of :class:`nn.Module` + :rtype: :class:`nn.Module` + """ + return build_from_registry(config, LAYERS) + + +def build_loss(config): + """Returns a loss function object of :class:`torch.autograd.Function` constructed + from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :return: An object of :class:`torch.autograd.Function` + :rtype: :class:`torch.autograd.Function` + """ + return build_from_registry(config, LOSSES) + + +def build_model(config): + """Returns a model object of :class:`nn.Module` constructed from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :return: An object of :class:`nn.Module` + :rtype: :class:`nn.Module` + """ + return build_from_registry(config, MODELS) + + +def build_dataset(config): + """Returns a dataset object of :class:`torch.utils.data.Dataset` constructed + from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :return: An object of :class:`torch.utils.data.Dataset` + :rtype: :class:`torch.utils.data.Dataset` + """ + return build_from_registry(config, DATASETS) + + +def build_optimizer(config, model, params: Iterable = None, need_module=False): + """Returns an optimizer object of :class:`torch.optim.Optimizer` constructed from `config`, + 'model' and 'params'. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :param model: A model containing parameters for the optimizer + :type model: :class:`nn.Module` + :param params: A dict containing parameters for the optimizer + :type params: dict, optional + :param need_module: Indicates whether the optimizer needs a module + :type params: bool, optional + :raises AssertionError: Raises an AssertionError if both `model` and `params` are None + :return: An object of :class:`torch.optim.Optimizer` + :rtype: :class:`torch.optim.Optimizer` + """ + assert model is not None or params is not None, 'arguments model and params can not both be None' + if need_module: + config['module'] = model + elif model is not None: + config['params'] = model.parameters() + elif params is not None: + config['params'] = params + + return build_from_registry(config, OPTIMIZERS) + + +def build_gradient_handler(config, model, optimizer): + """Returns a gradient handler object of :class:`BaseGradientHandler` constructed from `config`, + `model` and `optimizer`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :param model: A model containing parameters for the gradient handler + :type model: :class:`nn.Module` + :param optimizer: An optimizer object containing parameters for the gradient handler + :type optimizer: :class:`torch.optim.Optimizer` + :return: An object of :class:`BaseGradientHandler` + :rtype: :class:`BaseGradientHandler` + """ + config_ = config.copy() + mod_type = config_.pop('type') + return GRADIENT_HANDLER.get_module(mod_type)(model, optimizer, **config_) + + +def build_hooks(config, trainer): + """Returns a hook object of :class:`BaseHook` constructed from `config` and `trainer`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :param trainer: A :class:`Trainer` object containing parameters for the hook + :type trainer: :class:`Trainer` + :return: An object of :class:`BaseHook` + :rtype: :class:`BaseHook` + """ + config['trainer'] = trainer + return build_from_registry(config, HOOKS) + + +def build_transform(config): + """Returns a transformation object of :class:`torchvision.transforms` constructed + from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :return: An object of :class:`torchvision.transforms` + :rtype: :class:`torchvision.transforms` + """ + return build_from_registry(config, TRANSFORMS) + + +def build_pipe_alloc_policy(config): + """Returns a pipeline allocation policy object constructed from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :return: A pipeline allocation policy object + :rtype: + """ + return build_from_registry(config, PIPE_ALLOC_POLICY) + + +def build_data_sampler(config, dataset): + """Returns a data sampler object of :class:`colossalai.nn.data.sampler.BaseSampler` + constructed from `config`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :param dataset: An object of :class:`torch.utils.data.Dataset` containing information + used in the construction of the return object + :type dataset: :class:`torch.utils.data.Dataset` + :return: An object of :class:`colossalai.nn.data.sampler.BaseSampler` + :rtype: :class:`colossalai.nn.data.sampler.BaseSampler` + """ + config_ = config.copy() + mod_type = config_.pop('type') + return SAMPLERS.get_module(mod_type)(dataset, **config_) + + +def build_optimizer_wrapper(config, optimizer, model=None): + """Returns an optimizer wrapper object of :class:`torch.optim.Optimizer` constructed + from `config`, `model` and `optimizer`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :param optimizer: An optimizer object containing parameters for the gradient handler + :type optimizer: :class:`torch.optim.Optimizer` + :param model: A model containing parameters for the gradient handler + :type model: :class:`nn.Module`, optional + :return: An object of :class:`torch.optim.Optimizer` + :rtype: :class:`torch.optim.Optimizer` + """ + config_ = config.copy() + mod_type = config_.pop('type') + + # LSG: special treatment for zeor level 3 + if mod_type == 'ZeroRedundancyOptimizer_Level_3': + return OPTIMIZER_WRAPPERS.get_module(mod_type)(model, optimizer, **config_) + else: + return OPTIMIZER_WRAPPERS.get_module(mod_type)(optimizer, **config_) + + +def build_lr_scheduler(config, optimizer, total_steps, num_steps_per_epoch): + """Returns a learning rate scheduler object of :class:`torch.optim.lr_scheduler` + constructed from `config`, `optimizer`, `total_steps` and `num_steps_per_epoch`. + + :param config: A python dict or a :class:`colossalai.context.Config` object + containing information used in the construction of the return object + :type config: dict or :class:`colossalai.context.Config` + :param optimizer: An optimizer object containing parameters for the learning rate + scheduler + :type optimizer: :class:`torch.optim.Optimizer` + :param total_steps: Number of total steps of the learning rate scheduler + :type total_steps: int + :param num_steps_per_epoch: number of steps per epoch of the learning rate scheduler + :type num_steps_per_epoch: int + :return: An object of :class:`torch.optim.lr_scheduler` + :rtype: :class:`torch.optim.lr_scheduler` + """ + config_ = config.copy() + mod_type = config_.pop('type') + # warmup epochs will overwrite warmup steps + if 'warmup_epochs' in config_: + warmup_epochs = config_.pop('warmup_epochs') + config_['warmup_steps'] = int(num_steps_per_epoch * warmup_epochs) + return LR_SCHEDULERS.get_module(mod_type)(optimizer, total_steps, num_steps_per_epoch=num_steps_per_epoch, + **config_) diff --git a/colossalai/builder/pipeline.py b/colossalai/builder/pipeline.py new file mode 100644 index 000000000..caf5c8472 --- /dev/null +++ b/colossalai/builder/pipeline.py @@ -0,0 +1,226 @@ +import copy +import heapq + +from colossalai.builder import build_model, build_layer +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.logging import get_global_dist_logger +from colossalai.utils import set_to_cuda + + +def _binary_partition(weights, st, ed): + """Returns the binary partition position of `weights`, given the start + position `st` and the end position `ed`. + + :param weights: A python list to be binary partitioned + :type weights: list + :param st: the start position of the binary partition + :type st: int + :param ed: the end postition of the binary partition + :type ed: int + :return: the binary partition position of `weights` + :rtype: int + """ + w_sum = weights[ed - 1] + prefix = 0 + if st > 0: + w_sum -= weights[st - 1] + prefix = weights[st - 1] + minimum = float("inf") + for idx in range(st + 1, ed): + front = weights[idx - 1] - prefix + diff = abs(w_sum - 2 * front) + if diff < minimum: + pos = idx + minimum = diff + + return st, pos, ed + + +def _heap_addition(weights, intervals, add_cnt): + """ + """ + def _heap_push(heap, st, ed): + value = weights[ed - 1] + if st > 0: + value -= weights[st - 1] + heapq.heappush(heap, (-value, st, ed)) + + ret_intervals = [] + heap = [] + + for st, ed in intervals: + _heap_push(heap, st, ed) + + while add_cnt > 0: + _, st, ed = heapq.heappop(heap) + if ed - st == 1: + ret_intervals.append((st, ed)) + else: + l, m, r = _binary_partition(weights, st, ed) + _heap_push(heap, l, m) + _heap_push(heap, m, r) + add_cnt -= 1 + + while heap: + _, st, ed = heapq.heappop(heap) + ret_intervals.append((st, ed)) + + ret_intervals.sort() + return ret_intervals + + +def _calc_partitions(weights, value): + prev = 0 + prefix = 0 + num_block = 0 + intervals = [] + + for idx, w in enumerate(weights): + if weights[idx] - prefix > value: + intervals.append((prev, idx)) + prev = idx + prefix = weights[idx - 1] + num_block += 1 + + intervals.append((prev, len(weights))) + return num_block + 1, intervals + + +def _binary_search(weights, num): + length = len(weights) + prefix = [1 if w == 0 else w for w in weights] + for i in range(1, length): + prefix[i] += prefix[i - 1] + + lower_bound = max(weights) + upper_bound = prefix[length - 1] + + while upper_bound > lower_bound: + mid = (upper_bound + lower_bound) // 2 + number, _ = _calc_partitions(prefix, mid) + if number <= num: + upper_bound = mid + else: + lower_bound = mid + 1 + + num_block, intervals = _calc_partitions(prefix, upper_bound) + if num_block < num: + intervals = _heap_addition(prefix, intervals, num - num_block) + + return intervals + + +def _partition_uniform(num_items, num_parts, num_chunks): + assert num_items % num_chunks == 0, \ + "Layer length should be divided by the number of chunks, otherwise parameter method is recomended" + + logger = get_global_dist_logger() + parts = [[] for _ in range(num_parts)] + partition_items = num_items // num_chunks + for idx in range(num_chunks): + base_idx = idx * partition_items + chunk_size = partition_items // num_parts + left = num_parts - partition_items % num_parts + if chunk_size == 0: + logger.warning("Some nodes in Pipeline have no requests") + + for p in range(num_parts): + st = base_idx + base_idx += chunk_size + (p >= left) + parts[p].append((st, base_idx)) + + return parts + + +def _partition_balanced(weights, num_parts, num_chunks): + num_total = num_parts * num_chunks + num_items = len(weights) + if num_items <= num_total: + return _partition_uniform(num_items, num_parts, num_chunks) + + intervals = _binary_search(weights, num_total) + + current = 0 + parts = [[] for _ in range(num_parts)] + for inter in intervals: + parts[current].append(inter) + current = (current + 1) % num_parts + + return parts + + +class ModelInitializer(): + def __init__(self, config, num_chunks, verbose=False): + self.num_chunks = num_chunks + self.ori_model = build_model(config) + self.layers = self.ori_model.layers_cfg + layer_length = len(self.layers) + self.verbose = verbose + self._logger = get_global_dist_logger() + self._logger.info(f"The total length of layers is {layer_length}", ranks=[0]) + + def model_initialize(self, partition_method='parameter'): + # Some space for initializing comunication groups + self._interval = None + self._partition_layers(method=partition_method) + models = self._build() + model = set_to_cuda(models) + + return model + + def _partition_layers(self, method): + pipeline_parallel_size = gpc.get_world_size(ParallelMode.PIPELINE) + pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE) + + method = method.lower() + # Make a partition + if method == 'layer': + num_layers = len(self.layers) + self.parts = _partition_uniform(num_layers, pipeline_parallel_size, self.num_chunks) + elif method == 'parameter': + param_counts = self._count_layer_params() + # print_rank_0(param_counts) + self.parts = _partition_balanced(param_counts, pipeline_parallel_size, self.num_chunks) + else: + assert method == 'layer', "Method should be a pre-set string" + + # Display the partition + if gpc.get_global_rank() == 0 and self.verbose: + log_str = 'Layer allocation after partitioning: \n' + for stage in range(pipeline_parallel_size): + + num_layers = 0 + for st, ed in self.parts[stage]: + num_layers += ed - st + + log_str += f'\n===== stage={stage}, layers={num_layers} =====\n' + for st, ed in self.parts[stage]: + for idx, layer in enumerate(self.layers[st: ed]): + log_str += f'\t{idx + st:2d}: {layer}\n' + self._logger.info(log_str) + + # Save the partition + self._interval = self.parts[pipeline_rank] + + def _build(self): + """Build model from the layer cfg according to the partition + """ + models = [] + for st, ed in self._interval: + model = copy.copy(self.ori_model) + model.build_from_cfg(st, ed) + models.append(model) + + return models + + def _count_layer_params(self): + """Count the number of parameters in each layer + """ + param_counts = [0] * len(self.layers) + for idx, cfg in enumerate(self.layers): + layer = build_layer(cfg) + params = filter(lambda p: p.requires_grad, layer.parameters()) + param_counts[idx] = sum(p.numel() for p in params) + + return param_counts diff --git a/colossalai/checkpointing.py b/colossalai/checkpointing.py new file mode 100644 index 000000000..17db1a1a5 --- /dev/null +++ b/colossalai/checkpointing.py @@ -0,0 +1,215 @@ +import os +import os.path as osp +import re +from typing import Tuple + +import torch + +from .context import Config +from .context.parallel_mode import ParallelMode +from .core import global_context as gpc + +__all__ = [ + 'get_checkpoint_path', + 'get_latest_checkpoint_path', + 'get_latest_checkpoint_pattern', + 'save_checkpoint', + 'load_checkpoint' +] + + +def unwrap_config(config: Config): + ''' + unwrap Config objects to normal dicts + ''' + config_dict = dict() + for k, v in config.items(): + if isinstance(v, dict): + config_dict[k] = unwrap_config(v) + else: + config_dict[k] = v + + return config_dict + + +def _get_ranks_name(): + # tensor parallel + tp_local_rank = 0 + if gpc.is_initialized(ParallelMode.TENSOR): + tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR) + + # pipeline parallel + pp_local_rank = 0 + if gpc.is_initialized(ParallelMode.PIPELINE): + pp_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) + + ranks_name = f'tp{tp_local_rank}-pp{pp_local_rank}' + return ranks_name + + +def _get_standard_checkpoint_filename(epoch: int, suffix: str = ''): + ranks_name = _get_ranks_name() + return f'epoch{epoch}-{ranks_name}{suffix}.pt' + + +def get_checkpoint_path(checkpoint_dir: str, epoch: int, suffix: str = ''): + '''This is a function to generate the checkpoint path from the (checkpoint_dir, epoch, suffix, gpu_parallel_rank) tuple. + This is useful during generation and recuperation of the checkpoint. + + :param checkpoint_dir: set up a directory for saving checkpoints + :type checkpoint_dir: str + :param epoch: epoch number (indicate how many epochs have you trained this model) + :type epoch: int + :param suffix: additional notation to specify the model or checkpoint, defaults to '' + :type suffix: str, optional + :return: checkpoint path to be generated + :rtype: path + ''' + ckpt_filename = _get_standard_checkpoint_filename(epoch, suffix) + return os.path.join(checkpoint_dir, ckpt_filename) + + +def _ensure_directory_exists(filename: str): + # ensure the directory exists + dir = os.path.dirname(filename) + if not os.path.exists(dir): + os.makedirs(dir) + + +def get_latest_checkpoint_pattern(suffix: str = ''): + '''Generate Regular expression of latest checkpoint's pattern + + :param suffix: additional notation to specify the model or checkpoint, defaults to '' + :type suffix: str, optional + :return: checkpoint pattern + :rtype: regular expression + ''' + ranks_name = _get_ranks_name() + ckpt_pattern = re.compile(f'epoch(\d+)-{ranks_name}{suffix}\.pt') + return ckpt_pattern + + +def get_latest_checkpoint_path(checkpoint_dir: str, suffix: str = ''): + '''This is a function to retrieve the latest checkpoint path from the (checkpoint_dir, suffix, gpu_parallel_rank) tuple. + This is useful during recuperation of the checkpoint, especially when you do not know the epoch number. + + :param checkpoint_dir: directory for saving checkpoints + :type checkpoint_dir: str + :param suffix: additional notation to specify the model or checkpoint, defaults to '' + :type suffix: str, optional + :raises FileNotFoundError: raise error when we cannot find the latest checkpoint file with inputs given + :return: the latest checkpoint path to be retrieved + :rtype: path + ''' + CKPT_NAME_PAT = get_latest_checkpoint_pattern(suffix=suffix) + + last_epoch = -1 + assert osp.isdir(checkpoint_dir), f'{checkpoint_dir} is not a directory' + + for filename in os.listdir(checkpoint_dir): + ret = CKPT_NAME_PAT.match(filename) + if ret: + epoch = int(ret[0].split('-')[0].lstrip('epoch')) + if epoch > last_epoch: + last_epoch = epoch + + if last_epoch == -1: + ranks_name = _get_ranks_name() + raise FileNotFoundError(f"Cannot find the latest checkpoint file for {ranks_name} in {checkpoint_dir}") + else: + target_file = _get_standard_checkpoint_filename(last_epoch, suffix=suffix) + path = osp.join(checkpoint_dir, target_file) + return path + + +def save_checkpoint(checkpoint_path: str, + epoch: int, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, + **kwargs): + '''Given a directory to store the checkpoints, saves all the training components' parameters or buffers, such as model, optimizer, lr_scheduler and etc. into a checkpoint dictionary. + + This method can be used for both colosalai nn.BaseModel and normal pytorch nn.Module. + + + :param checkpoint_path: set up a directory for saving checkpoints + :type checkpoint_path: str + :param epoch: epoch number (indicate how many epochs have you trained this model) + :type epoch: int + :param model: model to be registered + :type model: torch.nn.Module + :param optimizer: optimizer to be registered + :type optimizer: torch.optim.Optimizer + :param lr_scheduler: lr_scheduler to be registered, defaults to None + :type lr_scheduler: torch.optim.lr_scheduler._LRScheduler, optional + ''' + # for compatibility with normal pytorch nn.Module + if hasattr(model, 'state_dict_for_save_checkpoint'): + model_sd = model.state_dict_for_save_checkpoint() + else: + model_sd = model.state_dict() + + # ckpt container + checkpoint = { + 'epoch': epoch, + 'model': model_sd, + 'optimizer': optimizer.state_dict(), + **kwargs + } + if lr_scheduler is not None: + checkpoint['lr_scheduler'] = lr_scheduler.state_dict() + + _ensure_directory_exists(checkpoint_path) + torch.save(checkpoint, checkpoint_path) + + +def load_checkpoint(checkpoint_path: str, + model: torch.nn.Module, + optimizer: torch.optim.Optimizer, + lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, + finetune: bool = False, + strict: bool = True) -> Tuple: + '''Loads the checkpoint file. + If finetune is False, then we intend to continue/resume the training process from the checkpoint given. + So we copy parameters and buffers from state_dict into these modules(model, optimizer,lr_scheduler) and its descendants. + If finetune is True, then only the weights and buffers of model should be reload. + If strict is True, then the keys of state_dict must exactly match the keys returned by this module’s state_dict() function. + + :param checkpoint_path: the exact and matched checkpoint_path directory to retrieve appropriate state_dict + :type checkpoint_path: str + :param model: model to reload parameters and buffers + :type model: torch.nn.Module + :param optimizer: optimizer to recuperate + :type optimizer: torch.optim.Optimizer + :param lr_scheduler: lr_scheduler to recuperate, defaults to None + :type lr_scheduler: torch.optim.lr_scheduler._LRScheduler, optional + :param finetune: whether to finetune the model with new dataset or continue the pre-training, defaults to False + :type finetune: bool, optional + :param strict: whether to strictly enforce that the keys in + :attr:`state_dict` of the checkpoint match the names of + parameters and buffers in model., defaults to True + :type strict: bool, optional + :raises ValueError: raise error if the model/optimizer cannot successfully be recuperated + :return: (the epoch number of the checkpoint retrieved, the checkpoint retrieved) + :rtype: Tuple + + ''' + # Load the checkpoint. + checkpoint = torch.load(checkpoint_path, map_location='cpu') + try: + last_epoch = checkpoint.pop('epoch') if not finetune else 0 + model.load_state_dict(checkpoint.pop('model'), strict=strict) + except KeyError: + raise ValueError('Checkpoint is corrupted') + + if not finetune: + try: + optimizer.load_state_dict(checkpoint.pop('optimizer')) + except KeyError: + raise ValueError('Checkpoint is corrupted') + + if lr_scheduler is not None and 'lr_scheduler' in checkpoint: + lr_scheduler.load_state_dict(checkpoint.pop('lr_scheduler')) + + return last_epoch, checkpoint diff --git a/colossalai/communication/__init__.py b/colossalai/communication/__init__.py new file mode 100644 index 000000000..4241bff4b --- /dev/null +++ b/colossalai/communication/__init__.py @@ -0,0 +1,14 @@ +from .collective import all_gather, reduce_scatter, scatter +from .p2p import (send_forward, send_forward_recv_forward, send_backward_recv_forward, + send_backward, send_backward_recv_backward, send_forward_recv_backward, + send_forward_backward_recv_forward_backward, recv_forward, recv_backward) +from .ring import ring_forward +from .utils import send_tensor_meta, recv_tensor_meta + +__all__ = [ + 'all_gather', 'reduce_scatter', 'scatter', + 'send_forward', 'send_forward_recv_forward', 'send_forward_backward_recv_forward_backward', + 'send_backward', 'send_backward_recv_backward', 'send_backward_recv_forward', + 'send_forward_recv_backward', 'recv_backward', 'recv_forward', + 'ring_forward', 'send_tensor_meta', 'recv_tensor_meta' +] \ No newline at end of file diff --git a/colossalai/communication/collective.py b/colossalai/communication/collective.py new file mode 100644 index 000000000..6db799c99 --- /dev/null +++ b/colossalai/communication/collective.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +import torch.distributed as dist +from torch import Tensor + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import get_current_device + + +def all_gather(tensor: Tensor, dim: int, + parallel_mode: ParallelMode) -> Tensor: + """Gathers all tensors from the parallel group and concatenates them in a + specific dimension. + + :param tensor: Tensor to be gathered + :param dim: The dimension concatenating in + :param parallel_mode: Parallel group mode used in this communication + :type tensor: Tensor + :type dim: int + :type parallel_mode: ParallelMode + :return: The tensor generated by all-gather + :rtype: Tensor + """ + depth = gpc.get_world_size(parallel_mode) + temp = tensor.clone() + shape = list(temp.shape) + shape[dim] *= depth + out = torch.empty(shape, dtype=temp.dtype, device=get_current_device()) + out = list(torch.chunk(out, depth, dim=dim)) + out = [val.contiguous() for val in out] + dist.all_gather(out, temp, group=gpc.get_group(parallel_mode)) + out = torch.cat(out, dim=dim) + return out + + +def reduce_scatter(tensor: Tensor, dim: int, + parallel_mode: ParallelMode) -> Tensor: + """Reduces all tensors then scatters it in a specific dimension to all + members in the parallel group. + + :param tensor: Tensor to be reduced and scattered + :param dim: The dimension scattering in + :param parallel_mode: Parallel group mode used in this communication + :type tensor: Tensor + :type dim: int + :type parallel_mode: ParallelMode + :return: The tensor generated by reduce-scatter + :rtype: Tensor + """ + depth = gpc.get_world_size(parallel_mode) + temp = list(torch.chunk(tensor, depth, dim=dim)) + temp = [val.contiguous() for val in temp] + out = torch.empty(temp[0].shape, + dtype=temp[0].dtype, + device=get_current_device()) + dist.reduce_scatter(output=out, + input_list=temp, + group=gpc.get_group(parallel_mode)) + return out + + +def scatter(tensor: Tensor, src: int, dim: int, + parallel_mode: ParallelMode) -> Tensor: + """Scatters in a specific dimension from source rank to all ranks in + the parallel group. + + :param tensor: Tensor to be scattered + :param dim: The dimension scattering in + :param parallel_mode: Parallel group mode used in this communication + :type tensor: Tensor + :type dim: int + :type parallel_mode: ParallelMode + :return: The tensor generated by scatter + :rtype: Tensor + """ + depth = gpc.get_world_size(parallel_mode) + temp = tensor.clone() + dist.broadcast(temp, src=src, group=gpc.get_group(parallel_mode)) + rank = gpc.get_local_rank(parallel_mode) + out = torch.chunk(temp, depth, dim=dim)[rank].contiguous() + return out diff --git a/colossalai/communication/p2p.py b/colossalai/communication/p2p.py new file mode 100644 index 000000000..7e761e180 --- /dev/null +++ b/colossalai/communication/p2p.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +import torch.distributed as dist + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import get_current_device + + +def _communicate(tensor_send_next=None, + tensor_send_prev=None, + recv_prev=False, + recv_next=False, + recv_prev_shape=None, + recv_next_shape=None, + prev_rank=None, + next_rank=None, + up_group=None, + down_group=None, + dtype=None): + """ + Adapted from megatron.p2p_communication. + Communicate tensors between stages. Used as helper method in other + communication methods that are used in pipeline schedule. + Takes the following arguments: + tensor_send_next: tensor to send to next rank (no tensor sent if + set to None). + tensor_send_prev: tensor to send to prev rank (no tensor sent if + set to None). + recv_prev: boolean for whether tensor should be received from + previous rank. + recv_next: boolean for whether tensor should be received from + next rank. + Returns: + (tensor_recv_prev, tensor_recv_next) + """ + + # Create placeholder tensors for receive in forward and backward directions + # if needed. + tensor_recv_prev = None + tensor_recv_next = None + + if recv_prev: + assert recv_prev_shape is not None + tensor_recv_prev = torch.empty(recv_prev_shape, + requires_grad=True, + device=get_current_device(), + dtype=dtype) + if recv_next: + assert recv_next_shape is not None + tensor_recv_next = torch.empty(recv_next_shape, + requires_grad=True, + device=get_current_device(), + dtype=dtype) + + if tensor_send_prev is not None or recv_prev: + if prev_rank is None: + prev_rank = gpc.get_prev_global_rank( + ParallelMode.PIPELINE) + if up_group is None: + up_group = gpc.get_group(ParallelMode.PIPELINE_PREV) + + if tensor_send_next is not None or recv_next: + if next_rank is None: + next_rank = gpc.get_next_global_rank( + ParallelMode.PIPELINE) + if down_group is None: + down_group = gpc.get_group(ParallelMode.PIPELINE_NEXT) + + # rank = dist.get_rank() + rank = gpc.get_global_rank() + + ops = [] + if tensor_send_prev is not None: + send_prev_op = dist.broadcast(tensor_send_prev, + src=rank, + group=up_group, + async_op=True) + ops.append(send_prev_op) + if tensor_recv_prev is not None: + recv_prev_op = dist.broadcast(tensor_recv_prev, + src=prev_rank, + group=up_group, + async_op=True) + ops.append(recv_prev_op) + if tensor_recv_next is not None: + recv_next_op = dist.broadcast(tensor_recv_next, + src=next_rank, + group=down_group, + async_op=True) + ops.append(recv_next_op) + if tensor_send_next is not None: + send_next_op = dist.broadcast(tensor_send_next, + src=rank, + group=down_group, + async_op=True) + ops.append(send_next_op) + for req in ops: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + return tensor_recv_prev, tensor_recv_next + + +def recv_forward(input_tensor_shape, prev_rank=None, up_group=None): + """Receives the input tensor from the previous member in pipeline. + + :param input_tensor_shape: The shape of the tensor to be recieved + :param prev_rank: The rank of the source of the tensor + :param up_group: Communication group including the previous member in pipeline parallel group + :type input_tensor_shape: torch.Size + :type prev_rank: int, optional + :type up_group: ProcessGroup, optional + :return: The input tensor in forward step + :rtype: Tensor + """ + if gpc.is_first_rank(ParallelMode.PIPELINE): + input_tensor = None + else: + input_tensor, _ = _communicate(recv_prev=True, + recv_prev_shape=input_tensor_shape, + prev_rank=prev_rank, + up_group=up_group) + return input_tensor + + +def recv_backward(output_grad_shape, next_rank=None, down_group=None): + """Receives the grad tensor from the next member in pipeline. + + :param output_grad_shape: The shape of the tensor to be recieved + :param next_rank: The rank of the source of the tensor + :param down_group: Communication group including the next member in pipeline parallel group + :type output_grad_shape: torch.Size + :type next_rank: int, optional + :type down_group: ProcessGroup, optional + :return: The grad of output tensor in forward step + :rtype: Tensor + """ + if gpc.is_last_rank(ParallelMode.PIPELINE): + output_tensor_grad = None + else: + _, output_tensor_grad = _communicate(recv_next=True, + recv_next_shape=output_grad_shape, + next_rank=next_rank, + down_group=down_group) + return output_tensor_grad + + +def send_forward(output_tensor, + next_rank=None, + down_group=None): + """Sends the input tensor to the next member in pipeline. + + :param output_tensor: Tensor to be sent + :param next_rank: The rank of the recipient of the tensor + :param down_group: Communication group including the next member in pipeline parallel group + :type output_tensor: Tensor + :type next_rank: int, optional + :type down_group: ProcessGroup, optional + """ + if not gpc.is_last_rank(ParallelMode.PIPELINE): + _communicate(tensor_send_next=output_tensor, + next_rank=next_rank, + down_group=down_group) + + +def send_backward(input_tensor_grad, + prev_rank=None, + up_group=None): + """Sends the grad tensor to the previous member in pipeline. + + :param input_tensor_grad: Tensor to be sent + :param prev_rank: The rank of the recipient of the tensor + :param up_group: Communication group including the previous member in pipeline parallel group + :type input_tensor_grad: Tensor + :type prev_rank: int, optional + :type up_group: ProcessGroup, optional + """ + if not gpc.is_first_rank(ParallelMode.PIPELINE): + _communicate(tensor_send_prev=input_tensor_grad, + prev_rank=prev_rank, + up_group=up_group) + + +def send_forward_recv_backward(output_tensor, + output_grad_shape, + recv_next=True, + next_rank=None, + down_group=None): + """Batched communication operation. Sends the input tensor to the + next member in pipeline, while recieves the grad tensor from the + next member in pipeline. + + :param output_tensor: Tensor to be sent + :param output_grad_shape: The shape of the tensor to be recieved + :type output_tensor: Tensor + :type output_grad_shape: torch.Size + :return: The grad of output tensor in forward step + :rtype: Tensor + """ + if gpc.is_last_rank(ParallelMode.PIPELINE): + output_tensor_grad = None + else: + _, output_tensor_grad = _communicate(tensor_send_next=output_tensor, + recv_next=recv_next, + recv_next_shape=output_grad_shape, + next_rank=next_rank, + down_group=down_group) + return output_tensor_grad + + +def send_backward_recv_forward(input_tensor_grad, + input_tensor_shape, + recv_prev=True, + prev_rank=None, + up_group=None): + """Batched communication operation. Sends the grad tensor to the + previous member in pipeline, while recieves the input tensor from the + previous member in pipeline. + + :param input_tensor_grad: Tensor to be sent + :param input_tensor_shape: The shape of the tensor to be recieved + :type input_tensor_grad: Tensor + :type input_tensor_shape: torch.Size + :return: The input tensor in forward step + :rtype: Tensor + """ + if gpc.is_first_rank(ParallelMode.PIPELINE): + input_tensor = None + else: + input_tensor, _ = _communicate(tensor_send_prev=input_tensor_grad, + recv_prev=recv_prev, + recv_prev_shape=input_tensor_shape, + prev_rank=prev_rank, + up_group=up_group) + return input_tensor + + +def send_forward_recv_forward(output_tensor, + input_tensor_shape, + recv_prev=True, + prev_rank=None, + next_rank=None, + up_group=None, + down_group=None): + """Batched communication operation. Sends the input tensor to the + next member in pipeline, while recieves the input tensor from the + previous member in pipeline. + + :param output_tensor: Tensor to be sent + :param input_tensor_shape: The shape of the tensor to be recieved + :type output_tensor: Tensor + :type input_tensor_shape: torch.Size + :return: The input tensor in forward step + :rtype: Tensor + """ + input_tensor, _ = _communicate(tensor_send_next=output_tensor, + recv_prev=recv_prev, + recv_prev_shape=input_tensor_shape, + prev_rank=prev_rank, + next_rank=next_rank, + up_group=up_group, + down_group=down_group) + return input_tensor + + +def send_backward_recv_backward(input_tensor_grad, + output_grad_shape, + recv_next=True, + prev_rank=None, + next_rank=None, + up_group=None, + down_group=None): + """Batched communication operation. Sends the grad tensor to the + previous member in pipeline, while recieves the grad tensor from the + next member in pipeline. + + :param input_tensor_grad: Tensor to be sent + :param output_grad_shape: The shape of the tensor to be recieved + :type input_tensor_grad: Tensor + :type output_grad_shape: torch.Size + :return: The grad of output tensor in forward step + :rtype: Tensor + """ + _, output_tensor_grad = _communicate(tensor_send_prev=input_tensor_grad, + recv_next=recv_next, + recv_next_shape=output_grad_shape, + prev_rank=prev_rank, + next_rank=next_rank, + up_group=up_group, + down_group=down_group) + return output_tensor_grad + + +def send_forward_backward_recv_forward_backward(output_tensor, + input_tensor_grad, + input_tensor_shape, + output_grad_shape, + recv_prev=True, + recv_next=True, + prev_rank=None, + next_rank=None, + up_group=None, + down_group=None): + """Batched communication operation. Sends the input tensor to the next and + the grad tensor to the previous, while recieves the grad tensor from the + next and the input tensor from the previous. + + :param output_tensor: Tensor sent to the next + :param input_tensor_grad: Tensor sent to the previous + :param input_tensor_shape: The shape of the tensor recieved from the previous + :param output_grad_shape: The shape of the tensor recieved from the next + :type output_tensor: Tensor + :type input_tensor_grad: Tensor + :type input_tensor_shape: torch.Size + :type output_grad_shape: torch.Size + :return: (the input tensor in forward step, the grad of output tensor in forward step) + :rtype: (Tensor, Tensor) + """ + input_tensor, output_tensor_grad = _communicate( + tensor_send_next=output_tensor, + tensor_send_prev=input_tensor_grad, + recv_prev=recv_prev, + recv_next=recv_next, + recv_prev_shape=input_tensor_shape, + recv_next_shape=output_grad_shape, + prev_rank=prev_rank, + next_rank=next_rank, + up_group=up_group, + down_group=down_group) + return input_tensor, output_tensor_grad diff --git a/colossalai/communication/ring.py b/colossalai/communication/ring.py new file mode 100644 index 000000000..d1b4266a6 --- /dev/null +++ b/colossalai/communication/ring.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import get_current_device, synchronize + + +def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode): + """Sends a tensor to the next member and recieves a tensor from the previous member. + This function returns the recieved tensor from the previous member. + + :param tensor_send_next: Tensor sent to next member + :param parallel_mode: Parallel group mode used in this communication + :type tensor_send_next: Tensor + :type parallel_mode: ParallelMode + :return: The tensor recieved from the previous + :rtype: Tensor + """ + buffer_shape = tensor_send_next.size() + + ops = [] + current_rank = gpc.get_global_rank() + + tensor_recv_prev = torch.empty(buffer_shape, + requires_grad=True, + device=get_current_device(), + dtype=tensor_send_next.dtype) + + # send to next rank + send_next_op = torch.distributed.P2POp( + torch.distributed.isend, tensor_send_next, + gpc.get_next_global_rank(parallel_mode)) + ops.append(send_next_op) + + # receive from prev rank + recv_prev_op = torch.distributed.P2POp( + torch.distributed.irecv, tensor_recv_prev, + gpc.get_prev_global_rank(parallel_mode)) + ops.append(recv_prev_op) + + if current_rank % 2 == 0: + ops = ops[::-1] + + reqs = torch.distributed.batch_isend_irecv(ops) + for req in reqs: + req.wait() + + # To protect against race condition when using batch_isend_irecv(). + synchronize() + + return tensor_recv_prev diff --git a/colossalai/communication/utils.py b/colossalai/communication/utils.py new file mode 100644 index 000000000..d6d7dc091 --- /dev/null +++ b/colossalai/communication/utils.py @@ -0,0 +1,73 @@ +import torch +import torch.distributed as dist + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import get_current_device + + +def send_tensor_meta(tensor, need_meta=True, down_group=None): + """Sends tensor meta information before sending a specific tensor. + Since the recipient must know the shape of the tensor in p2p communications, + meta information of the tensor should be sent before communications. This function + synchronizes with :func:`recv_tensor_meta`. + + :param tensor: Tensor to be sent + :param need_meta: If False, meta information won't be sent + :param down_group: Communication group including the next member in pipeline parallel group + :type tensor: Tensor + :type need_meta: bool, optional + :type down_group: ProcessGroup, optional + :return: False + :rtype: bool + """ + if need_meta: + rank = gpc.get_global_rank() + + if down_group is None: + down_group = gpc.get_group(ParallelMode.PIPELINE_NEXT) + + tensor_kwargs = {'dtype': torch.long, 'device': get_current_device()} + + send_shape = torch.tensor(tensor.size(), **tensor_kwargs) + send_ndims = torch.tensor(len(tensor.size()), **tensor_kwargs) + + dist.broadcast(send_ndims, src=rank, group=down_group) + dist.broadcast(send_shape, src=rank, group=down_group) + + return False + + +def recv_tensor_meta(tensor_shape, prev_rank=None, up_group=None): + """Recieves tensor meta information before recieving a specific tensor. + Since the recipient must know the shape of the tensor in p2p communications, + meta information of the tensor should be recieved before communications. This function + synchronizes with :func:`send_tensor_meta`. + + :param tensor_shape: The shape of the tensor to be recieved + :param prev_rank: The rank of the source of the tensor + :param up_group: Communication group including the previous member in pipeline parallel group + :type tensor_shape: torch.Size + :type prev_rank: int, optional + :type up_group: ProcessGroup, optional + :return: The shape of the tensor to be recieved + :rtype: torch.Size + """ + if tensor_shape is None: + if prev_rank is None: + prev_rank = gpc.get_prev_global_rank( + ParallelMode.PIPELINE) + if up_group is None: + up_group = gpc.get_group(ParallelMode.PIPELINE_PREV) + + tensor_kwargs = {'dtype': torch.long, 'device': get_current_device()} + + recv_ndims = torch.empty((), **tensor_kwargs) + dist.broadcast(recv_ndims, src=prev_rank, group=up_group) + + recv_shape = torch.empty(recv_ndims, **tensor_kwargs) + dist.broadcast(recv_shape, src=prev_rank, group=up_group) + + tensor_shape = torch.Size(recv_shape) + + return tensor_shape diff --git a/colossalai/constants.py b/colossalai/constants.py new file mode 100644 index 000000000..073dd2d2a --- /dev/null +++ b/colossalai/constants.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +ALLOWED_MODES = [None, '1d', '2d', '2.5d', '3d', 'sequence'] + +# intializer +INITIALIZER_MAPPING = { + 'data': 'Initializer_Data', + 'tensor': 'Initializer_Tensor', + 'pipeline': 'Initializer_Pipeline', + 'embedding': 'Initializer_Embedding', + '1d': 'Initializer_1D', + '2d': 'Initializer_2D', + '2.5d': 'Initializer_2p5D', + '3d': 'Initializer_3D', + 'sequence': 'Initializer_Sequence' +} + +# 2D paralllel +SUMMA_DIM = 'SUMMA_DIM' + +# 2.5D paralllel +TESSERACT_DIM = 'TESSERACT_DIM' +TESSERACT_DEP = 'TESSERACT_DEP' + +# 3D parallel +DEPTH_3D = 'DEPTH_3D' + +# Tensor parallel attributes +IS_TENSOR_PARALLEL = 'is_tensor_parallel' +TENSOR_PARALLEL_ATTRIBUTES = [IS_TENSOR_PARALLEL] diff --git a/colossalai/context/__init__.py b/colossalai/context/__init__.py new file mode 100644 index 000000000..3009779c8 --- /dev/null +++ b/colossalai/context/__init__.py @@ -0,0 +1,5 @@ +from .config import Config +from .parallel_context import ParallelContext +from .parallel_context import ParallelMode +from .process_group_initializer import * +from .random import * diff --git a/colossalai/context/_utils.py b/colossalai/context/_utils.py new file mode 100644 index 000000000..a770ea7b4 --- /dev/null +++ b/colossalai/context/_utils.py @@ -0,0 +1,70 @@ +import math + + +def set_parallel_size(obj, config: dict, key: str, attr_name: str): + if key in config: + ele = config[key] + if isinstance(ele, int): + setattr(obj, attr_name, ele) + elif isinstance(ele, dict): + setattr(obj, attr_name, ele['size']) + else: + raise NotImplementedError( + f"Parallel configuration does not support this kind of argument, please use int or dict" + ) + + +def add_tensor_pg(pg_init, mode, size, depth=None): + if mode == '1d': + pg_init.append(dict( + type='Initializer1D', + parallel_size=size + )) + elif mode == '2d': + dim = math.floor(math.sqrt(size)) + pg_init.append(dict( + type='Initializer2D_Col', + summa_dim=dim + )) + pg_init.append(dict( + type='Initializer2D_Row', + summa_dim=dim + )) + elif mode == '2.5d': + dim = math.floor(math.sqrt(size // depth)) + pg_init.append(dict( + type='Initializer_Tesseract_ROW', + tesseract_dim=dim, + tesseract_dep=depth + )) + pg_init.append(dict( + type='Initializer_Tesseract_COL', + tesseract_dim=dim, + tesseract_dep=depth + )) + pg_init.append(dict( + type='Initializer_Tesseract_DEP', + tesseract_dim=dim, + tesseract_dep=depth + )) + pg_init.append(dict( + type='Initializer_Tesseract_XZ', + tesseract_dim=dim, + tesseract_dep=depth + )) + elif mode == '3d': + dim = math.floor(math.pow(size, 1.0 / 3.0) + 0.5) + pg_init.append(dict( + type='ParallelInitializer3D_Input', + depth=dim + )) + pg_init.append(dict( + type='ParallelInitializer3D_Weight', + depth=dim + )) + pg_init.append(dict( + type='ParallelInitializer3D_Output', + depth=dim + )) + else: + raise NotImplementedError("This kind of tensor splitting has not been implemented yet") diff --git a/colossalai/context/config.py b/colossalai/context/config.py new file mode 100644 index 000000000..52a375aa1 --- /dev/null +++ b/colossalai/context/config.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import inspect +import sys +from importlib.machinery import SourceFileLoader +from pathlib import Path + + +class Config(dict): + """This is a wrapper class for dict objects so that values of which can be + accessed as attributes. + + :param config: The dict object to be wrapped + :type config: dict + """ + + def __init__(self, config: dict = None): + if config is not None: + for k, v in config.items(): + self._add_item(k, v) + + def __missing__(self, key): + raise KeyError(key) + + def __getattr__(self, key): + try: + value = super(Config, self).__getitem__(key) + return value + except KeyError: + raise AttributeError(key) + + def __setattr__(self, key, value): + super(Config, self).__setitem__(key, value) + + def _add_item(self, key, value): + if isinstance(value, dict): + self.__setattr__(key, Config(value)) + else: + self.__setattr__(key, value) + + def update(self, config): + assert isinstance(config, (Config, dict)), 'can only update dictionary or Config objects.' + for k, v in config.items(): + self._add_item(k, v) + return self + + @staticmethod + def from_file(filename: str): + """Reads a python file and constructs a corresponding :class:`Config` object. + + :param filename: Name of the file to construct the return object + :type filename: str + :raises AssertionError: Raises an AssertionError if the file does not exist, or the file + is not .py file + :return: A :class:`Config` object constructed with information in the file + :rtype: :class:`Config` + """ + + # check config path + if isinstance(filename, str): + filepath = Path(filename).absolute() + elif isinstance(filename, Path): + filepath = filename.absolute() + + assert filepath.exists(), f'{filename} is not found, please check your configuration path' + + # check extension + extension = filepath.suffix + assert extension == '.py', 'only .py files are supported' + + # import the config as module + remove_path = False + if filepath.parent not in sys.path: + sys.path.insert(0, (filepath)) + remove_path = True + + module_name = filepath.stem + source_file = SourceFileLoader(fullname=str(module_name), path=str(filepath)) + module = source_file.load_module() + + # load into config + config = Config() + + for k, v in module.__dict__.items(): + if k.startswith('__') or inspect.ismodule(v) or inspect.isclass(v): + continue + else: + config._add_item(k, v) + + # TODO: replace with logger warning here when logger is done + print('warning: variables which starts with __, is a module or class declaration are omitted') + + # remove module + del sys.modules[module_name] + if remove_path: + sys.path.pop(0) + + return config diff --git a/colossalai/context/parallel_context.py b/colossalai/context/parallel_context.py new file mode 100644 index 000000000..5a7a0bfb9 --- /dev/null +++ b/colossalai/context/parallel_context.py @@ -0,0 +1,454 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +import random +from typing import Union + +import numpy as np +import torch +import torch.distributed as dist + +from colossalai.constants import ALLOWED_MODES, INITIALIZER_MAPPING +from colossalai.context.config import Config +from colossalai.registry import DIST_GROUP_INITIALIZER +from ._utils import set_parallel_size +from .parallel_mode import ParallelMode +from .random import add_seed, get_seeds, set_mode + + +class ParallelContext: + """This class provides interface functions for users to get the parallel context, + such as the global rank, the local rank, the world size, etc. of each device. + + :param args: The distributed arguments in the system + :type args: dict + """ + + def __init__(self, args=None): + # distributed settings + self._global_ranks = dict() + self._local_ranks = dict() + self._world_sizes = dict() + self._groups = dict() + self._ranks_in_group = dict() + + # load config from file + self._dist_args = args + self._config = None + + # default 3D parallel args, will be overwritten during process group intialization + self.world_size = 1 + self.data_parallel_size = 1 + self.pipeline_parallel_size = 1 + self.tensor_parallel_size = 1 + + @property + def config(self): + return self._config + + def load_config(self, config: Union[dict, str]): + """Loads the configuration from either a dict or a file. + + :param config: Either a dict containing the configuration information or the filename + of a file containing the configuration information + :type config: dict or str + :raises TypeError: Raises a TypeError if `config` is neither a dict or a str + """ + if isinstance(config, str): + self._config = Config.from_file(config) + elif isinstance(config, dict): + self._config = Config(config) + else: + raise TypeError("Invalid type for config, only dictionary or string is supported") + + def set_dist_args(self, args): + """Sets the distributed arguments. + + :param args: The distributed arguments in the system + :type args: dict + """ + self._dist_args = args + + @staticmethod + def _check_parallel_mode(parallel_mode: ParallelMode): + assert isinstance(parallel_mode, ParallelMode) + + def get_global_rank(self): + """Returns the global rank of the current device. + + :return: The global rank of the current device + :rtype: int + """ + return self._global_ranks[ParallelMode.GLOBAL] + + def add_global_rank(self, parallel_mode: ParallelMode, rank: int): + """Adds the global rank of the current device for `parallel_mode` to the context. + + :param parallel_mode: The parallel mode for the rank + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param rank: The rank to be added + :type rank: int + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + """ + self._check_parallel_mode(parallel_mode) + self._global_ranks[parallel_mode] = rank + + def get_local_rank(self, parallel_mode: ParallelMode): + """Returns the local rank of the current device. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: The local rank of the current device for `parallel_mode` + :rtype: int + """ + self._check_parallel_mode(parallel_mode) + return self._local_ranks[parallel_mode] + + def add_local_rank(self, parallel_mode: ParallelMode, rank: int): + """Adds the local rank of the current device for `parallel_mode` to the context. + + :param parallel_mode: The parallel mode for the rank + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param rank: The rank to be added + :type rank: int + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + """ + self._check_parallel_mode(parallel_mode) + self._local_ranks[parallel_mode] = rank + + def get_next_global_rank(self, parallel_mode: ParallelMode): + """Returns the global rank of the next device. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: The global rank of the next device for `parallel_mode` + :rtype: int + """ + self._check_parallel_mode(parallel_mode) + + # get rank and world size + local_rank = self.get_local_rank(parallel_mode) + world_size = self.get_world_size(parallel_mode) + ranks_in_group = self.get_ranks_in_group(parallel_mode) + + return ranks_in_group[(local_rank + 1) % world_size] + + def get_prev_global_rank(self, parallel_mode: ParallelMode): + """Returns the global rank of the previous device. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: The global rank of the previous device for `parallel_mode` + :rtype: int + """ + self._check_parallel_mode(parallel_mode) + + # get rank and world size + local_rank = self.get_local_rank(parallel_mode) + world_size = self.get_world_size(parallel_mode) + ranks_in_group = self.get_ranks_in_group(parallel_mode) + + return ranks_in_group[(local_rank - 1) % world_size] + + def is_first_rank(self, parallel_mode: ParallelMode): + """Returns a boolean value indicating whether the current device is the first one + among its group for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: a boolean value indicating whether the current device is the first one + among its group for `parallel_mode` + :rtype: bool + """ + rank = self.get_local_rank(parallel_mode) + return rank == 0 + + def is_last_rank(self, parallel_mode: ParallelMode): + """Returns a boolean value indicating whether the current device is the last one + among its group for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: a boolean value indicating whether the current device is the last one + among its group for `parallel_mode` + :rtype: bool + """ + rank = self.get_local_rank(parallel_mode) + world_size = self.get_world_size(parallel_mode) + return rank == world_size - 1 + + def get_world_size(self, parallel_mode: ParallelMode): + """Returns the world size for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: The world size for `parallel_mode` + :rtype: int + """ + self._check_parallel_mode(parallel_mode) + return self._world_sizes[parallel_mode] + + def add_world_size(self, parallel_mode: ParallelMode, world_size: int): + """Adds world size for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param world_size: The world size to be added + :type world_size: int + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + """ + self._check_parallel_mode(parallel_mode) + self._world_sizes[parallel_mode] = world_size + + def get_group(self, parallel_mode: ParallelMode): + """Returns the group of the current device for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: The group of the current device for `parallel_mode` + :rtype: torch.distributed.ProcessGroup + """ + self._check_parallel_mode(parallel_mode) + return self._groups[parallel_mode] + + def add_group(self, parallel_mode: ParallelMode, group: dist.ProcessGroup): + """Adds the group of the current device for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param group: The group to be added + :type group: torch.distributed.ProcessGroup + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + """ + self._check_parallel_mode(parallel_mode) + self._groups[parallel_mode] = group + + def get_ranks_in_group(self, parallel_mode: ParallelMode): + """Returns the rank of the current device for `parallel_mode` in the group. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + :return: the rank of the current device for `parallel_mode` in the group + :rtype: int + """ + self._check_parallel_mode(parallel_mode) + return self._ranks_in_group[parallel_mode] + + def add_ranks_in_group(self, parallel_mode: ParallelMode, ranks: list): + """Adds the ranks of the current device for `parallel_mode` in the group. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param ranks: List of ranks to be added + :type ranks: list + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance + of :class:`colossalai.context.ParallelMode` + """ + self._check_parallel_mode(parallel_mode) + self._ranks_in_group[parallel_mode] = ranks + + def init_global_dist(self, addr=None, port=None): + """Initializes the global distributed environment. + + :param addr: The IP address of the current device + :type addr: str, optional + :param port: The port to be used in the system of the current device + :type port: int, optional + """ + # get config + rank = self._dist_args.local_rank + world_size = self._dist_args.world_size + # default env config, overwrite by exporting + # them in your bash script + addr = os.getenv('MASTER_ADDR', 'localhost') if addr is None else addr + port = os.getenv('MASTER_PORT', '8008') if port is None else port + init_method = f'tcp://{addr}:{port}' + + dist.init_process_group(backend=self._dist_args.backend, + rank=rank, + world_size=world_size, + init_method=init_method) + + # None will give the default global process group for pytorch dist operations + self._register_dist(rank, world_size, None, + list(range(world_size)), ParallelMode.GLOBAL) + self._global_ranks[ParallelMode.GLOBAL] = rank + + def _register_dist(self, local_rank, world_size, + process_group, ranks_in_group, mode): + self.add_local_rank(mode, local_rank) + self.add_world_size(mode, world_size) + self.add_group(mode, process_group) + self.add_ranks_in_group(mode, ranks_in_group) + + def check_sanity(self): + """Checks sanity of the parallel context. + + :raises AssertionError: Raises an AssertionError if the world size does not equal to the product + of data paralle size, pipeline parallel size and tensor parallel size + """ + dps = self.data_parallel_size + pps = self.pipeline_parallel_size + tps = self.tensor_parallel_size + ws = self.world_size + assert ws == dps * pps * tps, f"Expected the world size {ws} to be equal to data parallel size ({dps}) * pipeline parallel size ({pps}) * tensor parallel size ({tps})" + + def init_parallel_groups(self): + """Initializes the parallel groups. + + :raises AssertionError: Raises an AssertionError if the field paralle is not present in the config file + """ + + # get rank and world size + rank = self.get_global_rank() + world_size = self.get_world_size(ParallelMode.GLOBAL) + self.world_size = world_size + + assert hasattr(self.config, 'parallel'), 'Expected the field parallel to be present in the config file' + + # set parallel size as attributes for global context + parallel_config = self.config.parallel + set_parallel_size(self, parallel_config, 'pipeline', + 'pipeline_parallel_size') + set_parallel_size(self, parallel_config, 'tensor', + 'tensor_parallel_size') + + # the user should not set the data parallel size manually + # instead, it should be calculated based on other parallel config + self.data_parallel_size = self.world_size // (self.pipeline_parallel_size * self.tensor_parallel_size) + + # get the tensor parallel mode and check + tensor_parallel_mode = parallel_config['tensor'].get('mode', None) + assert tensor_parallel_mode in ALLOWED_MODES, f"mode in the parallel config must be set to one of {ALLOWED_MODES}" + self.check_sanity() + + pg_init = [] + # LSG: init data parallel process group for compatibility with other parallel module such as zero + pg_init.append(dict(type=INITIALIZER_MAPPING['data'])) + + if self.pipeline_parallel_size > 1: + pg_init.append(dict(type=INITIALIZER_MAPPING['pipeline'])) + pg_init.append(dict(type=INITIALIZER_MAPPING['tensor'])) + + # init specific tensor parallel group + if tensor_parallel_mode is not None: + tensor_parallel_cfg = parallel_config['tensor'].copy() + + # remove duplicate parameters + tensor_parallel_cfg.pop('mode') + tensor_parallel_cfg.pop('size') + + # add this config to initialize later + pg_init.append(dict(type=INITIALIZER_MAPPING[tensor_parallel_mode.lower()], **tensor_parallel_cfg)) + + # run initialization of different process groups + for initializer_cfg in pg_init: + cfg = initializer_cfg.copy() + initializer_type = cfg.pop('type') + initializer = DIST_GROUP_INITIALIZER.get_module(initializer_type)( + rank, world_size, self.config, + self.data_parallel_size, + self.pipeline_parallel_size, + self.tensor_parallel_size, + **cfg) + parallel_setting = initializer.init_dist_group() + if isinstance(parallel_setting, list): + for args in parallel_setting: + self._register_dist(*args) + else: + self._register_dist(*parallel_setting) + + def is_initialized(self, parallel_mode: ParallelMode): + """Returns a boolean value indicating whether `parallel_mode` is initialized + in the current system. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :return: a boolean value indicating whether `parallel_mode` is initialized + in the current system + :rtype: bool + """ + return parallel_mode in self._groups + + def destroy(self): + """Destroys the current distributed parallel environment. + """ + for mode, group in self._groups.items(): + if mode is not ParallelMode.GLOBAL: + dist.destroy_process_group(group) + # destroy global process group + dist.destroy_process_group() + + def set_device(self): + """Sets distributed processes to be bound to devices. + """ + devices_per_node = torch.cuda.device_count() + global_rank = self.get_global_rank() + device = global_rank % devices_per_node + torch.cuda.set_device(device) + print(f'process rank {global_rank} is bound to device {device}') + + def set_seed(self): + """Sets seeds for all random libraries. + """ + if hasattr(self.config, 'seed'): + seed = getattr(self.config, 'seed') + else: + seed = 2 # default seed + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + + global_rank = self.get_global_rank() + + if torch.cuda.is_available(): + # create random seed for different parallel modes + # data parallel seed are kept the same + parallel_seed = seed + add_seed(ParallelMode.DATA, parallel_seed) + + # model parallel seeds are different across ranks + pipeline_offset = self._local_ranks.get(ParallelMode.PIPELINE, 0) + + # add seed for data parallel and tensor parallel only + if self.is_initialized(ParallelMode.TENSOR): + tp_rank = self.get_local_rank(ParallelMode.TENSOR) + # 100 is only to increase the diff in seeds between pipeline stages + tp_rank_with_offset = tp_rank + pipeline_offset * 1024 + tp_seed = seed + tp_rank_with_offset + add_seed(ParallelMode.TENSOR, tp_seed) + + set_mode(ParallelMode.DATA) + seeds = get_seeds() + seed_str = ', '.join([f'{k}: {v}' for k, v in seeds.items()]) + + print(f"initialized seed on rank {global_rank}, " + f"numpy: {seed}, python random: {seed}, {seed_str}," + f"the default parallel seed is {ParallelMode.DATA}.", flush=True) + else: + print(f"initialized seed on rank {global_rank}, " + f"numpy: {seed}, python random: {seed}, pytorch: {seed}", flush=True) + print('WARNING: CUDA is not available, thus CUDA RNG cannot be used to track CUDA random number states', + flush=True) diff --git a/colossalai/context/parallel_mode.py b/colossalai/context/parallel_mode.py new file mode 100644 index 000000000..f51ed8ecf --- /dev/null +++ b/colossalai/context/parallel_mode.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from enum import Enum + + +# parallel modes +class ParallelMode(Enum): + """This is an enumeration class containing all possible parallel modes. + """ + + GLOBAL = 'global' + + # common parallel + DATA = 'data' + + # pipeline parallel + PIPELINE = 'pipe' + PIPELINE_PREV = 'pipe_prev' + PIPELINE_NEXT = 'pipe_next' + + # containing all ranks in tensor parallel + TENSOR = 'tensor' + + # sequence parallel + SEQUENCE = 'sequence' + + # 1D Parallel + PARALLEL_1D = '1d' + + # 2D parallel + PARALLEL_2D_ROW = '2d_row' + PARALLEL_2D_COL = '2d_col' + + # 3D parallel + PARALLEL_3D_INPUT = '3d_input' + PARALLEL_3D_WEIGHT = '3d_weight' + PARALLEL_3D_OUTPUT = '3d_output' + + # 2.5D parallel + PARALLEL_2P5D_ROW = '2p5d_row' + PARALLEL_2P5D_COL = '2p5d_col' + PARALLEL_2P5D_DEP = '2p5d_dep' + PARALLEL_2P5D_XZ = '2p5d_xz' diff --git a/colossalai/context/process_group_initializer/__init__.py b/colossalai/context/process_group_initializer/__init__.py new file mode 100644 index 000000000..c7db5d39c --- /dev/null +++ b/colossalai/context/process_group_initializer/__init__.py @@ -0,0 +1,15 @@ +from .initializer_1d import Initializer_1D +from .initializer_2d import Initializer_2D +from .initializer_2p5d import Initializer_2p5D +from .initializer_3d import Initializer_3D +from .initializer_data import Initializer_Data +from .initializer_pipeline import Initializer_Pipeline +from .initializer_sequence import Initializer_Sequence +from .initializer_tensor import Initializer_Tensor +from .process_group_initializer import ProcessGroupInitializer + +__all__ = [ + 'Initializer_Tensor', 'Initializer_Sequence', 'Initializer_Pipeline', + 'Initializer_Data', 'Initializer_2p5D', 'Initializer_2D', 'Initializer_3D', + 'Initializer_1D', 'ProcessGroupInitializer' +] diff --git a/colossalai/context/process_group_initializer/initializer_1d.py b/colossalai/context/process_group_initializer/initializer_1d.py new file mode 100644 index 000000000..784480a72 --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_1d.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.distributed as dist + +from colossalai.context import Config +from colossalai.core import global_context as gpc +from colossalai.registry import DIST_GROUP_INITIALIZER +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_1D(ProcessGroupInitializer): + '''A ProcessGroupInitializer for 1d tensor parallelism. + ''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.num_group = self.world_size // self.tensor_parallel_size + + def init_dist_group(self): + '''Initialize 1D tensor parallel groups, and assign local_ranks and groups to each gpu. + + :return: (local_rank, group_world_size, process_group, ranks_in_group, mode) + :rtype: tuple + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_1D + + for i in range(self.num_group): + ranks = [i * self.tensor_parallel_size + j for j in range(self.tensor_parallel_size)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode diff --git a/colossalai/context/process_group_initializer/initializer_2d.py b/colossalai/context/process_group_initializer/initializer_2d.py new file mode 100644 index 000000000..aa06b5e65 --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_2d.py @@ -0,0 +1,123 @@ +import math +import os + +import torch.distributed as dist + +from colossalai.constants import SUMMA_DIM +from colossalai.registry import DIST_GROUP_INITIALIZER +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +def _check_summa_env_var(summa_dim): + # check environment variable for SUMMA + env_summa_dim = os.environ.get(SUMMA_DIM, None) + + if env_summa_dim: + assert int(env_summa_dim) == summa_dim, \ + 'SUMMA_DIM has been set in the current environment and ' \ + 'does not match with the value passed to this initialized' + else: + os.environ[SUMMA_DIM] = str(summa_dim) + + +class Initializer_2D_Row(ProcessGroupInitializer): + '''2d tensor parallel initialization among rows. + ''' + + def __init__(self, num_group, summa_dim, *args, **kwargs): + super(Initializer_2D_Row, self).__init__(*args, **kwargs) + self.num_group = num_group + self.summa_dim = summa_dim + + def init_dist_group(self): + '''Initialize 2D tensor row parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2D tensor row parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_2D_ROW + + for i in range(self.num_group): + for j in range(self.summa_dim): + ranks = [i * self.tensor_parallel_size + j * self.summa_dim + k + for k in range(self.summa_dim)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +class Initializer_2D_Col(ProcessGroupInitializer): + '''2d tensor parallel initialization among cols. + ''' + + def __init__(self, num_group, summa_dim, *args, **kwargs): + super(Initializer_2D_Col, self).__init__(*args, **kwargs) + self.num_group = num_group + self.summa_dim = summa_dim + + def init_dist_group(self): + '''Initialize 2D tensor row parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2D tensor col parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_2D_COL + + for i in range(self.num_group): + for j in range(self.summa_dim): + ranks = [i * self.tensor_parallel_size + j + k * self.summa_dim + for k in range(self.summa_dim)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_2D(ProcessGroupInitializer): + """ + Serve as the single entry point to 2D parallel initialization. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.num_group = self.world_size // self.tensor_parallel_size + self.summa_dim = int(math.sqrt(self.tensor_parallel_size)) + + assert self.tensor_parallel_size == self.summa_dim ** 2, \ + "2D summa dim should equal to tensor parallel size ^ 0.5" + _check_summa_env_var(self.summa_dim) + + self.col_initializer = Initializer_2D_Col(self.num_group, self.summa_dim, *args, **kwargs) + self.row_initializer = Initializer_2D_Row(self.num_group, self.summa_dim, *args, **kwargs) + + def init_dist_group(self): + '''Initialize 2D tensor row and col parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2D tensor parallelism's information + :rtype: list of tuples (local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + parallel_setting = [] + parallel_setting.append(self.row_initializer.init_dist_group()) + parallel_setting.append(self.col_initializer.init_dist_group()) + return parallel_setting diff --git a/colossalai/context/process_group_initializer/initializer_2p5d.py b/colossalai/context/process_group_initializer/initializer_2p5d.py new file mode 100644 index 000000000..cacfdc590 --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_2p5d.py @@ -0,0 +1,255 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math +import os + +import torch.distributed as dist + +from colossalai.constants import TESSERACT_DIM, TESSERACT_DEP +from colossalai.context import Config +from colossalai.core import global_context as gpc +from colossalai.registry import DIST_GROUP_INITIALIZER +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +def _check_tesseract_env_var(tesseract_dim: int, + tesseract_dep: int): + # check environment variable for TESSERACT + env_tesseract_dim = os.environ.get(TESSERACT_DIM, None) + env_tesseract_dep = os.environ.get(TESSERACT_DEP, None) + + if env_tesseract_dim and env_tesseract_dep: + assert int(env_tesseract_dim) == tesseract_dim, \ + 'TESSERACT_DIM has been set in the current environment and ' \ + 'does not match with the value passed to this initialized' + assert int(env_tesseract_dep) == tesseract_dep, \ + 'TESSERACT_DEP has been set in the current environment and ' \ + 'does not match with the value passed to this initialized' + else: + os.environ[TESSERACT_DIM] = str(tesseract_dim) + os.environ[TESSERACT_DEP] = str(tesseract_dep) + + +# i row j col k dep +class Initializer_2p5D_ROW(ProcessGroupInitializer): + '''2p5d tensor parallel initialization among rows. + ''' + + def __init__(self, + tesseract_dim: int, + tesseract_dep: int, + *args): + super(Initializer_2p5D_ROW, self).__init__(*args) + + self.tensor_parallel_size = gpc.tensor_parallel_size + self.num_group = self.world_size // self.tensor_parallel_size + self.tesseract_dep = tesseract_dep + self.tesseract_dim = tesseract_dim + assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ + "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" + + def init_dist_group(self): + '''Initialize 2p5D tensor row parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2p5D tensor row parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_2P5D_ROW + + for h in range(self.num_group): + for j in range(self.tesseract_dim): + for k in range(self.tesseract_dep): + ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * ( + j + self.tesseract_dim * k) for i in range(self.tesseract_dim)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +class Initializer_2p5D_Col(ProcessGroupInitializer): + '''2p5d tensor parallel initialization among cols. + ''' + def __init__(self, + tesseract_dim: int, + tesseract_dep: int, + *args): + super(Initializer_2p5D_Col, self).__init__(*args) + + self.tensor_parallel_size = gpc.tensor_parallel_size + self.num_group = self.world_size // self.tensor_parallel_size + self.tesseract_dep = tesseract_dep + self.tesseract_dim = tesseract_dim + assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ + "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" + + def init_dist_group(self): + '''Initialize 2p5D tensor col parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2p5D tensor col parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_2P5D_COL + + for h in range(self.num_group): + for i in range(self.tesseract_dim): + for k in range(self.tesseract_dep): + ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * ( + j + self.tesseract_dim * k) for j in range(self.tesseract_dim)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +class Initializer_2p5D_Dep(ProcessGroupInitializer): + '''2p5D tensor parallel initialization among depths. + ''' + def __init__(self, + tesseract_dim: int, + tesseract_dep: int, + *args): + super(Initializer_2p5D_Dep, self).__init__(*args) + + self.tensor_parallel_size = gpc.tensor_parallel_size + self.num_group = self.world_size // self.tensor_parallel_size + self.tesseract_dep = tesseract_dep + self.tesseract_dim = tesseract_dim + assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ + "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" + + def init_dist_group(self): + '''Initialize 2p5D tensor depth parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2p5D tensor depth parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_2P5D_DEP + + for h in range(self.num_group): + for i in range(self.tesseract_dim): + for j in range(self.tesseract_dim): + ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * ( + j + self.tesseract_dim * k) for k in range(self.tesseract_dep)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +# i row j col k dep +class Initializer_2p5D_XZ(ProcessGroupInitializer): + '''2p5d tensor parallel initialization among cols times dep. + ''' + def __init__(self, + tesseract_dim: int, + tesseract_dep: int, + *args): + super(Initializer_2p5D_XZ, self).__init__(*args) + + self.tensor_parallel_size = gpc.tensor_parallel_size + self.num_group = self.world_size // self.tensor_parallel_size + self.tesseract_dep = tesseract_dep + self.tesseract_dim = tesseract_dim + assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ + "Tensor parallel size should be depth * dim ** 2 in 2.5D parallel" + + def init_dist_group(self): + '''Initialize 2p5D tensor colXdepth parallel groups, and assign local_ranks and groups to each gpu. + + :return: 2p5D tensor colXdepth parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_2P5D_XZ + + for h in range(self.num_group): + for i in range(self.tesseract_dim): + ranks = [h * self.tensor_parallel_size + i + self.tesseract_dim * ( + j + self.tesseract_dim * k) for k in range(self.tesseract_dep) for j in + range(self.tesseract_dim)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_2p5D(ProcessGroupInitializer): + """ + Serve as the single entry point to Tesseract parallel initialization. + """ + + def __init__(self, + rank: int, + world_size: int, + config: Config, + data_parallel_size: int, + pipeline_parlalel_size: int, + tensor_parallel_size: int, + depth: int + ): + args = (rank, world_size, config, data_parallel_size, pipeline_parlalel_size, tensor_parallel_size) + super().__init__(*args) + self.num_group = self.world_size // self.tensor_parallel_size + self.tesseract_dim = int(math.sqrt(self.tensor_parallel_size / depth)) + self.tesseract_dep = depth + + assert self.tensor_parallel_size == self.tesseract_dim ** 2 * self.tesseract_dep, \ + "2.5D tesseract dim should equal to (tensor parallel size / tesseract dep) ^ 0.5" + _check_tesseract_env_var(self.tesseract_dim, self.tesseract_dep) + + self.col_initializer = Initializer_2p5D_Col(self.tesseract_dim, self.tesseract_dep, *args) + self.row_initializer = Initializer_2p5D_ROW(self.tesseract_dim, self.tesseract_dep, *args) + self.dep_initializer = Initializer_2p5D_Dep(self.tesseract_dim, self.tesseract_dep, *args) + self.xz_initializer = Initializer_2p5D_XZ(self.tesseract_dim, self.tesseract_dep, *args) + + def init_dist_group(self): + '''Initialize 2p5D tensor row, col, depth, and colXdepth parallel groups, and assign local_ranks and groups to each gpu. + + :return: Whole 2p5D tensor parallelism's information + :rtype: list of tuples (local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + parallel_setting = [] + parallel_setting.append(self.col_initializer.init_dist_group()) + parallel_setting.append(self.row_initializer.init_dist_group()) + parallel_setting.append(self.dep_initializer.init_dist_group()) + parallel_setting.append(self.xz_initializer.init_dist_group()) + return parallel_setting diff --git a/colossalai/context/process_group_initializer/initializer_3d.py b/colossalai/context/process_group_initializer/initializer_3d.py new file mode 100644 index 000000000..391230767 --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_3d.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math +import os + +import torch.distributed as dist +from colossalai.constants import DEPTH_3D +from colossalai.registry import DIST_GROUP_INITIALIZER + +from ..parallel_mode import ParallelMode +from .process_group_initializer import ProcessGroupInitializer + + +def _check_depth_env_var(depth): + # check environment variable for SUMMA + env_depth = os.environ.get(DEPTH_3D, None) + + if env_depth: + assert int(env_depth) == depth, \ + 'SUMMA_DIM has been set in the current environment and ' \ + 'does not match with the value passed to this initialized' + else: + os.environ[DEPTH_3D] = str(depth) + + +class Initializer_3D_Input(ProcessGroupInitializer): + '''2D tensor parallel initialization among input. + ''' + def __init__(self, num_group: int, depth: int, *args): + super().__init__(*args) + self.num_group = num_group + self.depth = depth + + def init_dist_group(self): + '''Initialize 3D tensor parallel groups among input, and assign local_ranks and groups to each gpu. + + :return: 3D tensor parallelism's information among input + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_3D_INPUT + + for h in range(self.num_group): + for i in range(self.depth): + for k in range(self.depth): + ranks = [ + h * self.depth**3 + i + self.depth * + (j + self.depth * k) for j in range(self.depth) + ] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +class Initializer_3D_Weight(ProcessGroupInitializer): + '''3D tensor parallel initialization among weight. + ''' + + def __init__(self, num_group: int, depth: int, *args): + super().__init__(*args) + self.num_group = num_group + self.depth = depth + + def init_dist_group(self): + '''Initialize 3D tensor parallel groups among weight, and assign local_ranks and groups to each gpu. + + :return: 3D tensor parallelism's information among weight + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_3D_WEIGHT + + for h in range(self.num_group): + for k in range(self.depth): + for j in range(self.depth): + ranks = [ + h * self.depth**3 + i + self.depth * + (j + self.depth * k) for i in range(self.depth) + ] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +class Initializer_3D_Output(ProcessGroupInitializer): + '''2D tensor parallel initialization among weight. + ''' + + def __init__(self, num_group: int, depth: int, *args): + super().__init__(*args) + self.num_group = num_group + self.depth = depth + + def init_dist_group(self): + '''Initialize 3D tensor parallel groups among output, and assign local_ranks and groups to each gpu. + + :return: 3D tensor parallelism's information among output + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.PARALLEL_3D_OUTPUT + + for h in range(self.num_group): + for i in range(self.depth): + for j in range(self.depth): + ranks = [ + h * self.depth**3 + i + self.depth * + (j + self.depth * k) for k in range(self.depth) + ] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_3D(ProcessGroupInitializer): + '''Serve as the single entry point to 3D parallel initialization. + ''' + def __init__(self, *args): + super().__init__(*args) + self.num_group = self.world_size // self.tensor_parallel_size + self.depth = round(math.pow(self.tensor_parallel_size, 1 / 3)) + assert self.tensor_parallel_size == self.depth ** 3, \ + f'3D depth ({self.depth}) if not cube root of tensor parallel size ({self.tensor_parallel_size})' + _check_depth_env_var(self.depth) + + self.input_initializer = Initializer_3D_Input(self.num_group, + self.depth, *args) + self.weight_initializer = Initializer_3D_Weight( + self.num_group, self.depth, *args) + self.output_initializer = Initializer_3D_Output( + self.num_group, self.depth, *args) + + def init_dist_group(self): + '''Initialize 3D tensor parallel groups, and assign local_ranks and groups to each gpu. + + :return: 3D tensor parallelism's information + :rtype: list of tuples (local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + parallel_setting = [] + parallel_setting.append(self.input_initializer.init_dist_group()) + parallel_setting.append(self.weight_initializer.init_dist_group()) + parallel_setting.append(self.output_initializer.init_dist_group()) + return parallel_setting diff --git a/colossalai/context/process_group_initializer/initializer_data.py b/colossalai/context/process_group_initializer/initializer_data.py new file mode 100644 index 000000000..09ffc32f1 --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_data.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from torch import distributed as dist + +from colossalai.registry import DIST_GROUP_INITIALIZER +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_Data(ProcessGroupInitializer): + '''A ProcessGroupInitializer for data parallelism. + ''' + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.num_data_parallel_group = self.world_size // self.data_parallel_size + + def init_dist_group(self): + '''Initialize data parallel groups, and assign local_ranks and groups to each gpu. + + :return: data parallelism's information + :rtype: tuple (local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.DATA + + for i in range(self.num_data_parallel_group): + ranks = [i + j * self.num_data_parallel_group for j in range(self.data_parallel_size)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode diff --git a/colossalai/context/process_group_initializer/initializer_pipeline.py b/colossalai/context/process_group_initializer/initializer_pipeline.py new file mode 100644 index 000000000..d66c6f9af --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_pipeline.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from torch import distributed as dist + +from colossalai.registry import DIST_GROUP_INITIALIZER +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_Pipeline(ProcessGroupInitializer): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.data_group_size = self.world_size // self.data_parallel_size + self.pipeline_stage_size = self.data_group_size // self.pipeline_parallel_size + + def init_dist_group(self): + dist_settings = list() + for i in range(self.data_parallel_size): + for j in range(self.pipeline_stage_size): + pipe_ranks = list( + range(i * self.data_group_size + j, + (i + 1) * self.data_group_size, + self.pipeline_stage_size)) + pipe_group_size = len(pipe_ranks) + pipe_group = dist.new_group(pipe_ranks) + + if self.rank in pipe_ranks: + local_rank = pipe_ranks.index(self.rank) + group_world_size = pipe_group_size + process_group = pipe_group + ranks_in_group = pipe_ranks + dist_settings.append( + tuple((local_rank, group_world_size, + process_group, ranks_in_group, + ParallelMode.PIPELINE))) + + for k in range(pipe_group_size): + first = pipe_ranks[k] + second = pipe_ranks[(k + 1) % pipe_group_size] + ranks = [first, second] + group = dist.new_group(ranks) + if self.rank == first: + local_rank = 0 + group_world_size = 2 + process_group = group + ranks_in_group = ranks + dist_settings.append( + tuple((local_rank, group_world_size, + process_group, ranks_in_group, + ParallelMode.PIPELINE_NEXT))) + elif self.rank == second: + local_rank = 1 + group_world_size = 2 + process_group = group + ranks_in_group = ranks + dist_settings.append( + tuple((local_rank, group_world_size, + process_group, ranks_in_group, + ParallelMode.PIPELINE_PREV))) + + return dist_settings diff --git a/colossalai/context/process_group_initializer/initializer_sequence.py b/colossalai/context/process_group_initializer/initializer_sequence.py new file mode 100644 index 000000000..0d0c41e2d --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_sequence.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from colossalai.registry import DIST_GROUP_INITIALIZER +from .initializer_tensor import Initializer_Tensor +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_Sequence(ProcessGroupInitializer): + '''A ProcessGroupInitializer for sequence parallelism. + ''' + + def __init__(self, + *args, **kwargs): + super().__init__(*args, **kwargs) + # reuse tensor parallel code + self._initializer = Initializer_Tensor(*args, **kwargs) + + def init_dist_group(self): + local_rank, group_world_size, process_group, ranks_in_group, mode = self._initializer.init_dist_group() + + # change mode to sequence + mode = ParallelMode.SEQUENCE + + return local_rank, group_world_size, process_group, ranks_in_group, mode diff --git a/colossalai/context/process_group_initializer/initializer_tensor.py b/colossalai/context/process_group_initializer/initializer_tensor.py new file mode 100644 index 000000000..a748e7758 --- /dev/null +++ b/colossalai/context/process_group_initializer/initializer_tensor.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.distributed as dist + +from colossalai.registry import DIST_GROUP_INITIALIZER +from .process_group_initializer import ProcessGroupInitializer +from ..parallel_mode import ParallelMode + + +@DIST_GROUP_INITIALIZER.register_module +class Initializer_Tensor(ProcessGroupInitializer): + '''A ProcessGroupInitializer for tensor parallelism. + ''' + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.num_tensor_parallel_group = self.world_size // self.tensor_parallel_size + + def init_dist_group(self): + '''Initialize tensor parallel groups, and assign local_ranks and groups to each gpu. + + :return: tensor parallelism's information + :rtype: tuple(local_rank, group_world_size, process_group, ranks_in_group, mode) + ''' + local_rank = None + ranks_in_group = None + process_group = None + group_world_size = None + mode = ParallelMode.TENSOR + + for i in range(self.num_tensor_parallel_group): + ranks = [i * self.tensor_parallel_size + j for j in range(self.tensor_parallel_size)] + group = dist.new_group(ranks) + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, ranks_in_group, mode diff --git a/colossalai/context/process_group_initializer/process_group_initializer.py b/colossalai/context/process_group_initializer/process_group_initializer.py new file mode 100644 index 000000000..548d1f771 --- /dev/null +++ b/colossalai/context/process_group_initializer/process_group_initializer.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC, abstractmethod + +from colossalai.context import Config + + +class ProcessGroupInitializer(ABC): + '''An object, knowing the parallelism configuration, that initializes parallel groups. + ''' + def __init__(self, + rank: int, + world_size: int, + config: Config, + data_parallel_size: int, + pipeline_parlalel_size: int, + tensor_parallel_size: int + ): + self.rank = rank + self.world_size = world_size + self.data_parallel_size = data_parallel_size + self.config = config + self.pipeline_parallel_size = pipeline_parlalel_size + self.tensor_parallel_size = tensor_parallel_size + super().__init__() + + @abstractmethod + def init_dist_group(self): + pass diff --git a/colossalai/context/random/__init__.py b/colossalai/context/random/__init__.py new file mode 100644 index 000000000..29e77e3ec --- /dev/null +++ b/colossalai/context/random/__init__.py @@ -0,0 +1,8 @@ +from ._helper import (seed, set_mode, with_seed, add_seed, + get_seeds, get_states, get_current_mode, + set_seed_states, sync_states) + +__all__ = [ + 'seed', 'set_mode', 'with_seed', 'add_seed', 'get_seeds', + 'get_states', 'get_current_mode', 'set_seed_states', 'sync_states' +] diff --git a/colossalai/context/random/_helper.py b/colossalai/context/random/_helper.py new file mode 100644 index 000000000..1bc7af738 --- /dev/null +++ b/colossalai/context/random/_helper.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import functools +from contextlib import contextmanager + +import torch.cuda +from torch import Tensor + +from .seed_manager import SeedManager +from ..parallel_mode import ParallelMode + +_SEED_MANAGER = SeedManager() + + +def get_seeds(): + """Returns the seeds of the seed manager. + + :return: The seeds of the seed manager + :rtype: dict + """ + return _SEED_MANAGER.seeds + + +def get_states(copy=False): + """Returns the seed states of the seed manager. + + :return: The seed states of the seed manager + :rtype: dict + """ + states = _SEED_MANAGER.seed_states + + if copy: + new_states = dict() + + for parallel_mode, state in states.items(): + new_states[parallel_mode] = state.clone() + return new_states + else: + return _SEED_MANAGER.seed_states + + +def get_current_mode(): + """Returns the current mode of the seed manager. + + :return: The current mode of the seed manager. + :rtype: :class:`torch.ByteTensor` + """ + return _SEED_MANAGER.current_mode + + +def add_seed(parallel_mode: ParallelMode, seed: int): + """Adds a seed to the seed manager for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param seed: The seed to be added + :type seed: int + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of + :class:`colossalai.context.ParallelMode` or the seed for `parallel_mode` has been added + """ + _SEED_MANAGER.add_seed(parallel_mode, seed) + + +def set_mode(parallel_mode: ParallelMode): + """Sets the current mode of the seed manager. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + """ + _SEED_MANAGER.set_mode(parallel_mode) + + +def set_seed_states(parallel_mode: ParallelMode, state: Tensor): + """Sets the state of the seed manager for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param state: the state to be set + :type state: :class:`torch.Tensor` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not found in the seed manager + """ + _SEED_MANAGER.set_state(parallel_mode, state) + + +def sync_states(): + current_mode = get_current_mode() + current_states = torch.cuda.get_rng_state() + set_seed_states(current_mode, current_states) + + +@contextmanager +def seed(parallel_mode: ParallelMode): + """ A context for seed switch + + Examples:: + + with seed(ParallelMode.DATA): + output = F.dropout(input) + + """ + try: + # set to new mode + current_mode = _SEED_MANAGER.current_mode + yield _SEED_MANAGER.set_mode(parallel_mode) + finally: + # recover + _SEED_MANAGER.set_mode(current_mode) + + +def with_seed(func, parallel_mode: ParallelMode): + """ + A function wrapper which executes the function with a specified seed. + + Examples:: + + # use with decorator + @with_seed(ParallelMode.DATA) + def forward(input): + return F.dropout(input) + out = forward(input) + # OR use it inline + def forward(input): + return F.dropout(input) + wrapper_forward = with_seed(forward, ParallelMode.DATA) + out = wrapped_forward(input) + + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + # switch mode + current_mode = _SEED_MANAGER.current_mode + _SEED_MANAGER.set_mode(parallel_mode) + + # exec func + out = func(*args, **kwargs) + + # recover state + _SEED_MANAGER.set_mode(current_mode) + + return out + + return wrapper diff --git a/colossalai/context/random/seed_manager.py b/colossalai/context/random/seed_manager.py new file mode 100644 index 000000000..3e74c8cb9 --- /dev/null +++ b/colossalai/context/random/seed_manager.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +from torch import Tensor + +from colossalai.context.parallel_mode import ParallelMode + + +class SeedManager: + """This class is a manager of all random seeds involved in the system. + """ + + def __init__(self): + self._current_mode = None + self._seeds = dict() + self._seed_states = dict() + + @property + def current_mode(self): + return self._current_mode + + @property + def seeds(self): + return self._seeds + + @property + def seed_states(self): + return self._seed_states + + def set_state(self, parallel_mode: ParallelMode, state: Tensor): + """Sets the state of the seed manager for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param state: the state to be set + :type state: :class:`torch.Tensor` + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not found in the seed manager + """ + assert parallel_mode in self._seed_states, f'Parallel mode {parallel_mode} is not found in the seed manager' + self._seed_states[parallel_mode] = state + + def set_mode(self, parallel_mode: ParallelMode): + """Sets the current mode of the seed manager. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + """ + if self.current_mode: + # save the current state for current mode + self._seed_states[self._current_mode] = torch.cuda.get_rng_state() + + # set the new state for new mode + self._current_mode = parallel_mode + torch.cuda.set_rng_state(self._seed_states[parallel_mode]) + + def add_seed(self, parallel_mode: ParallelMode, seed: int): + """Adds a seed to the seed manager for `parallel_mode`. + + :param parallel_mode: The chosen parallel mode + :type parallel_mode: :class:`colossalai.context.ParallelMode` + :param seed: The seed to be added + :type seed: int + :raises AssertionError: Raises an AssertionError if `parallel_mode` is not an instance of + :class:`colossalai.context.ParallelMode` or the seed for `parallel_mode` has been added + """ + assert isinstance( + parallel_mode, ParallelMode), 'A valid ParallelMode must be provided' + assert parallel_mode not in self._seed_states, f'The seed for {parallel_mode} has been added' + current_state = torch.cuda.get_rng_state() + torch.cuda.manual_seed(seed) + self._seed_states[parallel_mode] = torch.cuda.get_rng_state() + self._seeds[parallel_mode] = seed + torch.cuda.set_rng_state(current_state) diff --git a/colossalai/core.py b/colossalai/core.py new file mode 100644 index 000000000..39453e4a0 --- /dev/null +++ b/colossalai/core.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from colossalai.context import ParallelContext + +global_context = ParallelContext() + + +def set_global_context(context: ParallelContext): + '''Reset global context to be identical to a given :class:ParallelContext. + + :param context: Parallel context to generate our global parallel context. + :type context: ParallelContext + ''' + global global_context + global_context = context diff --git a/colossalai/engine/__init__.py b/colossalai/engine/__init__.py new file mode 100644 index 000000000..c00be7df6 --- /dev/null +++ b/colossalai/engine/__init__.py @@ -0,0 +1,7 @@ +from .amp_type import AMP_TYPE +from ._base_engine import Engine +from .gradient_handler import * +from .schedule import * + + +__all__ = ['Engine'] diff --git a/colossalai/engine/_base_engine.py b/colossalai/engine/_base_engine.py new file mode 100644 index 000000000..843ef1d4f --- /dev/null +++ b/colossalai/engine/_base_engine.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Optional + +from colossalai.builder import build_gradient_handler +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.logging import get_global_dist_logger +from colossalai.nn import (ZeroRedundancyOptimizer_Level_2, + ZeroRedundancyOptimizer_Level_3) +from torch.nn import Module +from torch.nn.modules.loss import _Loss +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torch.utils.data import DataLoader + +from .schedule import BaseSchedule, NoPipelineSchedule + + +class Engine: + """Basic engine class for training and evaluation. It runs a specific process method + :meth:`step` which is based on the given :attr:`schedule` over each batch of a dataset. + + :param train_dataloader: Dataloader in training + :param test_dataloader: Dataloader in evaluation + :param model: The neural network model + :param criterion: Criterion for calculating loss + :param optimizer: Optimizer for updating the parameters + :param lr_scheduler: Learning rate scheduler ajusting learning rate during the training or evaluation + :param schedule: Running schedule in :meth:`step` + :type train_dataloader: DataLoader, optional + :type test_dataloader: DataLoader, optional + :type model: Module + :type criterion: _Loss, optional + :type optimizer: Optimizer, optional + :type lr_scheduler: _LRScheduler, optional + :type schedule: BaseSchedule, optional + """ + def __init__(self, + train_dataloader: Optional[DataLoader] = None, + test_dataloader: Optional[DataLoader] = None, + model: Module = None, + criterion: _Loss = None, + optimizer: Optimizer = None, + lr_scheduler: Optional[_LRScheduler] = None, + schedule: BaseSchedule = None): + self.train_dataloader = train_dataloader + self.test_dataloader = test_dataloader + assert model is not None, "Engine requires a model" + self.model = model + self.criterion = criterion + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + self.schedule = schedule if schedule is not None \ + else NoPipelineSchedule() + self._logger = get_global_dist_logger() + + # build gradient handler + self._gradient_handlers = [] + gradient_handler_cfg = [] + + if hasattr(gpc.config, 'gradient_handler'): + assert isinstance(gpc.config.gradient_handler, list), \ + f'argument gradient_handler_cfg expected type list, ' \ + f'but got type {type(gpc.config.gradient_handler)}' + gradient_handler_cfg = gpc.config.gradient_handler + elif isinstance(self.optimizer, (ZeroRedundancyOptimizer_Level_2, + ZeroRedundancyOptimizer_Level_3)): + gradient_handler_cfg = [dict(type='ZeROGradientHandler')] + self._logger.info( + "Training with zero is detected, ZeROGradientHandler is automatically " + "added even though not specified in the configuration", + ranks=[0]) + elif gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size( + ParallelMode.DATA) > 1: + gradient_handler_cfg = [dict(type='DataParallelGradientHandler')] + self._logger.info( + "Data parallel training is detected, DataParallelGradientHandler is automatically " + "added even though not specified in the configuration", + ranks=[0]) + if len(gradient_handler_cfg) == 0: + self._logger.warning( + "No gradient handler is set up, please make sure you do not need " + "to all-reduce the gradients after a training step.", + ranks=[0]) + for cfg in gradient_handler_cfg: + handler = build_gradient_handler(cfg, self.model, self.optimizer) + self._gradient_handlers.append(handler) + + self.schedule.initialize(self.train_dataloader, self.model, + self.criterion, self.optimizer, + self.lr_scheduler) + self.forward_only = False + + def handle_gradient(self): + """Handles all-reduce operations of gradients across different parallel groups. + """ + for handler in self._gradient_handlers: + handler.handle_gradient() + + def set_dataloader(self, data: DataLoader, train: bool = True): + """Sets dataloader in training or evaluation. + + :param data: Dataloader to be set + :param train: Set training dataloader if True, otherwise evaluation dataloader + :type data: DataLoader + :type train: bool + """ + if train: + self.train_dataloader = data + else: + self.test_dataloader = data + + def get_model(self): + """Returns the neural network model in the engine. + """ + return self.model + def get_optimizer(self): + """Returns optimizier in the engine. + """ + return self.optimizer + + def get_lr_scheduler(self): + """Returns the learning rate scheduler in the engine. + """ + return self.lr_scheduler + + def train(self): + """Sets the model to training mode. + """ + self.forward_only = False + self.schedule.train(dataloader=self.train_dataloader, mode=True) + + def eval(self): + """Sets the model to evaluation mode. + """ + self.forward_only = True + self.schedule.train(dataloader=self.test_dataloader, mode=False) + + def is_train(self): + """Returns True if it is in training, otherwise False. + """ + return not self.forward_only + + def get_lr(self): + """Gets current learning rate. + """ + return self.schedule.get_lr() + + def step(self, return_loss=True): + """A running step based on the schedule. Usually, it runs a training or + evaluation over a batch of dataset. + + :param return_loss: loss will be returned if True + :type return_loss: bool + :return: (output, lablel, loss) + """ + self.schedule.zero_grad(forward_only=self.forward_only) + + output, label, loss = self.schedule.forward_backward_step( + forward_only=self.forward_only, return_loss=return_loss) + + if not self.forward_only: + # all reduce gradients + self.handle_gradient() + + self.schedule.step() + + return output, label, loss diff --git a/colossalai/engine/amp_type.py b/colossalai/engine/amp_type.py new file mode 100644 index 000000000..7f7c5a659 --- /dev/null +++ b/colossalai/engine/amp_type.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from enum import Enum + + +class AMP_TYPE(Enum): + APEX = 'apex' + TORCH = 'torch' + PARALLEL = 'parallel' diff --git a/colossalai/engine/gradient_handler/__init__.py b/colossalai/engine/gradient_handler/__init__.py new file mode 100644 index 000000000..3f896baa5 --- /dev/null +++ b/colossalai/engine/gradient_handler/__init__.py @@ -0,0 +1,5 @@ +from ._base_gradient_handler import BaseGradientHandler +from ._data_parallel_gradient_handler import DataParallelGradientHandler +from ._zero_gradient_handler import ZeROGradientHandler + +__all__ = ['BaseGradientHandler', 'DataParallelGradientHandler', 'ZeROGradientHandler'] diff --git a/colossalai/engine/gradient_handler/_base_gradient_handler.py b/colossalai/engine/gradient_handler/_base_gradient_handler.py new file mode 100644 index 000000000..31f2e6e57 --- /dev/null +++ b/colossalai/engine/gradient_handler/_base_gradient_handler.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC, abstractmethod + + +class BaseGradientHandler(ABC): + """A basic helper class to handle all-reduce operations of gradients across different parallel groups + before optimization. + + :param model: Model where the gradients accumulate + :param optimizer: Optimizer for updating the parameters + :type model: Module + :type optimizer: Optimizer + """ + def __init__(self, model, optimizer): + self._model = model + self._optimizer = optimizer + + @abstractmethod + def handle_gradient(self): + """A method to accumulate gradients across different parallel groups. Users should + write their own functions or just use the functions in pre-defined subclasses. + """ + pass diff --git a/colossalai/engine/gradient_handler/_data_parallel_gradient_handler.py b/colossalai/engine/gradient_handler/_data_parallel_gradient_handler.py new file mode 100644 index 000000000..9fa414cfd --- /dev/null +++ b/colossalai/engine/gradient_handler/_data_parallel_gradient_handler.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python + +import torch.distributed as dist +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from colossalai.core import global_context as gpc +from colossalai.registry import GRADIENT_HANDLER +from ._base_gradient_handler import BaseGradientHandler +from ...context.parallel_mode import ParallelMode + + +@GRADIENT_HANDLER.register_module +class DataParallelGradientHandler(BaseGradientHandler): + """A helper class to handle all-reduce operations in a data parallel group. + A all-reduce collective communication will be operated in + :func:`handle_gradient` among a data parallel group. + For better performance, it bucketizes the gradients of all parameters that are + the same type to improve the efficiency of communication. + """ + + def handle_gradient(self): + """A method running a all-reduce operation in a data parallel group. + """ + # TODO: add memory buffer + if gpc.data_parallel_size > 1: + # bucketize and all-reduce + buckets = {} + # Pack the buckets. + for param in self._model.parameters(): + if param.requires_grad and param.grad is not None: + tp = param.data.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(param) + param.main_grad = param.grad + + # For each bucket, all-reduce and copy all-reduced grads. + for tp in buckets: + bucket = buckets[tp] + grads = [param.grad.data for param in bucket] + coalesced = _flatten_dense_tensors(grads) + coalesced /= gpc.get_world_size(ParallelMode.DATA) + + dist.all_reduce( + coalesced, group=gpc.get_group(ParallelMode.DATA)) + for buf, synced in zip(grads, _unflatten_dense_tensors( + coalesced, grads)): + buf.copy_(synced) diff --git a/colossalai/engine/gradient_handler/_zero_gradient_handler.py b/colossalai/engine/gradient_handler/_zero_gradient_handler.py new file mode 100644 index 000000000..b303bcb39 --- /dev/null +++ b/colossalai/engine/gradient_handler/_zero_gradient_handler.py @@ -0,0 +1,16 @@ +from colossalai.registry import GRADIENT_HANDLER +from ._base_gradient_handler import BaseGradientHandler + + +@GRADIENT_HANDLER.register_module +class ZeROGradientHandler(BaseGradientHandler): + """A helper class to handle all-reduce operations in a data parallel group. + A all-reduce collective communication will be operated in + :func:`handle_gradient` among a data parallel group. + This class is specialized with ZeRO optimization. + """ + + def handle_gradient(self): + """A method running a all-reduce operation in a data parallel group. + """ + self._optimizer.allreduce_gradients() diff --git a/colossalai/engine/schedule/__init__.py b/colossalai/engine/schedule/__init__.py new file mode 100644 index 000000000..dba95469b --- /dev/null +++ b/colossalai/engine/schedule/__init__.py @@ -0,0 +1,5 @@ +from ._base_schedule import BaseSchedule +from ._no_pipeline import NoPipelineSchedule +from ._pipeline import PipelineSchedule + +__all__ = ['BaseSchedule', 'NoPipelineSchedule', 'PipelineSchedule'] diff --git a/colossalai/engine/schedule/_base_schedule.py b/colossalai/engine/schedule/_base_schedule.py new file mode 100644 index 000000000..c64031c09 --- /dev/null +++ b/colossalai/engine/schedule/_base_schedule.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC, abstractmethod + +import torch + +from colossalai.logging import get_global_dist_logger +from colossalai.utils import get_current_device + + +class BaseSchedule(ABC): + """A basic helper class to control the process of training or evaluation. + """ + def __init__(self): + self.initialized = False + self.logger = get_global_dist_logger() + + @property + @abstractmethod + def num_steps(self): + """The number of batches in training or evaluation. + """ + pass + + def initialize(self, + dataloader=None, + model=None, + criterion=None, + optimizer=None, + lr_scheduler=None): + """Initializes the schedule and set parameters before running. + + :param dataloader: DataLoader in training or evaluation + :param model: The neural network model + :param criterion: Criterion for calculating loss + :param optimizer: Optimizer for updating the parameters + :param lr_scheduler: Learning rate scheduler in the process + """ + self.dataloader = dataloader + assert model is not None, "Schedule requires a model" + self.model = model + assert criterion is not None, "Schedule requires a criterion" + self.criterion = criterion + assert optimizer is not None, "Schedule requires an optimizer" + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + self.initialized = True + + def check_initialized(self): + """Checks whether the schedule is initialized. + """ + assert self.initialized, \ + 'Schedule is not initialized. Call schedule.initialize(...) before using it.' + + def load_batch(self): + """Loads a batch of dataset. It returns the data and labels which are + already in the same GPU as where the model's. + + :return: (data, label) + :rtype: (Tensor, Tensor) + """ + self.check_initialized() + if self.data_iter is None: + raise RuntimeError('Dataloader is not defined.') + data, label = next(self.data_iter) + return self._move_to_device(data), self._move_to_device(label) + + def _move_to_device(self, data): + if isinstance(data, ( + tuple, + list, + )): + data = tuple([ + d.to(get_current_device()).detach() for d in data + if torch.is_tensor(d) + ]) + elif torch.is_tensor(data): + data = data.to(get_current_device()).detach() + return data + + def train(self, dataloader=None, mode=True): + """Sets the dataloader to be used and turn the model to + training or evaluation mode. + + :param dataloader: Dataloader to be used + :param mode: If True, the model will set as training mode. Otherwise, evaluation mode. + """ + self.check_initialized() + if mode: + self.model.train() + else: + self.model.eval() + if dataloader is not None: + self.dataloader = dataloader + self.data_iter = iter(dataloader) + + def zero_grad(self, forward_only=False): + """Cleans gradients with the optimizer. + """ + if not forward_only: + self.check_initialized() + self.optimizer.zero_grad() + + def get_lr(self): + """Returns the current learning rate. + """ + if self.lr_scheduler is not None: + return self.lr_scheduler.get_lr()[0] + else: + return self.optimizer.param_groups[0]['lr'] + + def step(self): + """Updates the parameters and learning rate with the optimizer. + """ + self.check_initialized() + self.optimizer.step() + # update lr scheduler + if self.lr_scheduler is not None: + self.lr_scheduler.step() + + @abstractmethod + def forward_backward_step(self, forward_only=False, return_loss=True): + """The process function over a batch of dataset for training or evaluation. + + :param forward_only: If True, the process won't include backward. + :param return_loss: If False, the loss won't be returned. + """ + pass diff --git a/colossalai/engine/schedule/_no_pipeline.py b/colossalai/engine/schedule/_no_pipeline.py new file mode 100644 index 000000000..3ab1fa2d3 --- /dev/null +++ b/colossalai/engine/schedule/_no_pipeline.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +try: + import apex.amp as apex_amp +except: + print('apex is required for mixed precision training') +try: + import torch.cuda.amp as torch_amp +except: + print('PyTorch amp is not supported with the current PyTorch version') + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine.amp_type import AMP_TYPE +from colossalai.nn import (ZeroRedundancyOptimizer_Level_2, + ZeroRedundancyOptimizer_Level_3) +from ._utils import convert_to_fp16 +from ._base_schedule import BaseSchedule + + +class NoPipelineSchedule(BaseSchedule): + """A helper schedule class for no pipeline parallelism running environment. + During one process, it loads a batch of dataset and feeds it to the model. + After getting the output and calculating the loss, it will use :meth:`step` + to update the parameters if it is in training mode. + + :param amp_type: The type of automatic mixed precision + :param amp_config: The configuration of automatic mixed procision + :type amp_type: AMP_TYPE + :type amp_config: dict + """ + def __init__( + self, + amp_type: AMP_TYPE = None, + amp_config: dict = None, + ): + super().__init__() + + # mixed precision training + assert amp_type is None or isinstance(amp_type, AMP_TYPE), \ + 'unrecognised value for argument fp16, it can only be None, torch or apex' + + # LSG: check compatibility + # LSG: torch.cuda.amp and apex.amp cannot be used for tensor parallel + if gpc.is_initialized(ParallelMode.TENSOR) and gpc.get_world_size( + ParallelMode.TENSOR) > 1: + assert amp_type != AMP_TYPE.TORCH and amp_type != AMP_TYPE.APEX, \ + 'You can only AMP_TYPE.PARALLEL for tensor parallel training' + self.use_zero_level_2_3 = False + + if amp_type is not None: + self.fp16 = True + self.amp_type = amp_type + + if amp_config is not None: + assert isinstance(amp_config, dict), \ + f'expected argument fp16_config to be type dictionary, but got {type(amp_config)}' + + if self.amp_type == AMP_TYPE.TORCH: + # torch apex + if amp_config is None: + amp_config = dict() + self.amp_cfg = amp_config + elif self.amp_type == AMP_TYPE.APEX: + # apex amp + if amp_config is None: + amp_config = dict(opt_level='O2') + self.logger.warning( + 'apex is deprecated, please consider using torch.cuda.amp instead.' + ) + self.amp_cfg = amp_config + elif self.amp_type == AMP_TYPE.PARALLEL: + # use fp16 optimizer for tensor parallel training + if amp_config is None: + amp_config = dict() + self.amp_cfg = amp_config + else: + self.fp16 = False + self.amp_type = None + + @property + def num_steps(self): + return len(self.dataloader) + + def initialize(self, + dataloader, + model, + criterion, + optimizer, + lr_scheduler=None): + super().initialize(dataloader, + model, + criterion, + optimizer, + lr_scheduler=lr_scheduler) + if isinstance(self.optimizer, (ZeroRedundancyOptimizer_Level_2, + ZeroRedundancyOptimizer_Level_3)): + self.use_zero_level_2_3 = True + assert self.amp_type != AMP_TYPE.PARALLEL, 'ZeRO Level 2 and 3 are mutually exclusive with AMP_TYPE.PARALLEL' + + if self.fp16: + if self.amp_type == AMP_TYPE.TORCH: + self._torch_amp_scaler = torch_amp.GradScaler(**self.amp_cfg) + elif self.amp_type == AMP_TYPE.APEX: + self.model, self.optimizer = apex_amp.initialize( + self.model, self.optimizer, **self.amp_cfg) + + def forward_backward_step(self, forward_only=False, return_loss=True): + """The process function that loads loads a batch of dataset and feeds it to the model. + The returned labels and loss will None if :attr:`return_loss` is False. + + :return: (output, label, loss) + """ + assert forward_only or return_loss, \ + 'The argument \'return_loss\' has to be True when \'forward_only\' is False, but got False.' + + data, label = self.load_batch() + loss = None + + # LSG: leave for debug, make sure dataloader is deterministic + # if forward_only: + # img = data[0] + # rank = gpc.get_local_rank(ParallelMode.DATA) + # world_size = gpc.get_world_size(ParallelMode.DATA) + # group = gpc.get_group(ParallelMode.DATA) + # input_list = [img.clone() for _ in range(world_size)] + # output_list = [torch.empty_like(img) for _ in range(world_size)] + # output_list[rank] = img.clone() + # dist.all_to_all(output_tensor_list=output_list, input_tensor_list=input_list, group=group) + # assert torch.equal(output_list[0], output_list[1]) # and torch.equal(output_list[1], output_list[2]) + + # forward + if self.fp16 and self.amp_type == AMP_TYPE.TORCH: + with torch_amp.autocast(): + output = self.model(*data) + if not isinstance(output, (tuple, list)): + output = (output,) + if return_loss: + loss = self.criterion(*output, *label) + else: + if self.use_zero_level_2_3 or self.amp_type == AMP_TYPE.PARALLEL: + data = convert_to_fp16(data) + + output = self.model(*data) + if not isinstance(output, (tuple, list)): + output = (output,) + if return_loss: + loss = self.criterion(*output, *label) + + if not forward_only: + # backward + if self.use_zero_level_2_3: + self.optimizer.backward(loss) + elif self.fp16: + if self.amp_type == AMP_TYPE.APEX: + with apex_amp.scale_loss(loss, + self.optimizer) as scaled_loss: + scaled_loss.backward() + elif self.amp_type == AMP_TYPE.TORCH: + self._torch_amp_scaler.scale(loss).backward() + elif self.amp_type == AMP_TYPE.PARALLEL: + loss = self.optimizer.scale_loss(loss) + loss.backward() + # scale back to display the original value in logs + loss.div_(self.optimizer.grad_scaler.scale) + else: + loss.backward() + + if return_loss: + return output, label, loss + else: + return output, None, None + + def step(self): + # step optimizer + if self.fp16 and self.amp_type == AMP_TYPE.TORCH: + self._torch_amp_scaler.step(self.optimizer) + self._torch_amp_scaler.update() + else: + self.optimizer.step() + + # update lr scheduler + if self.lr_scheduler is not None: + self.lr_scheduler.step() diff --git a/colossalai/engine/schedule/_pipeline.py b/colossalai/engine/schedule/_pipeline.py new file mode 100644 index 000000000..0b477c0d5 --- /dev/null +++ b/colossalai/engine/schedule/_pipeline.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Union + +import torch.cuda +import torch.distributed as dist +from torch import Tensor + +from colossalai.communication import * +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn import (ZeroRedundancyOptimizer_Level_2, + ZeroRedundancyOptimizer_Level_3) +from colossalai.utils import get_current_device +from ._base_schedule import BaseSchedule +from ._utils import convert_to_fp16 +from ..amp_type import AMP_TYPE + + +def squeeze(x: Union[Tensor, tuple, list]): + if isinstance(x, (tuple, list)): + return x[0] + else: + return x + + +class PipelineSchedule(BaseSchedule): + """A helper schedule class for pipeline parallelism running environment. + It uses non-interleaved 1F1B strategy. Other properties are similar as + :class:`NoPipelineSchedule`. + + :param num_microbatches: The number of microbatches + :param amp_type: The type of automatic mixed precision + :param amp_config: The configuration of automatic mixed procision + :type num_microbatches: int + :type amp_type: AMP_TYPE + :type amp_config: dict + """ + + def __init__(self, + num_microbatches, + amp_type: AMP_TYPE = None, + amp_config: dict = None): + super().__init__() + + self.num_microbatches = num_microbatches + self.data_sync = True # close after making sure data is identical + + # amp + # LSGL: amp_config is not used, but leave here for future extension + self.amp_type = amp_type + self.amp_config = amp_config + + if self.amp_type is not None: + assert self.amp_type == AMP_TYPE.PARALLEL, 'We only support AMP_TYPE.PARALLEL for pipeline training for now' + + def _move_to_device(self, data): + if isinstance(data, ( + tuple, + list, + )): + assert len(data) == 1, "Data tuple's length in pipeline should be 1" + data = data[0] + assert torch.is_tensor(data), "Data in pipeline should be tensor" + data = data.to(get_current_device()).detach() + return data + + def _sync_data(self): + if gpc.is_first_rank(ParallelMode.PIPELINE): + src_rank = gpc.get_global_rank() + dist.broadcast( + tensor=self.batch_data, + src=src_rank, + group=gpc.get_group(ParallelMode.PIPELINE_PREV) + ) + dist.broadcast( + tensor=self.batch_label, + src=src_rank, + group=gpc.get_group(ParallelMode.PIPELINE_PREV) + ) + if gpc.is_last_rank(ParallelMode.PIPELINE): + src_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) + dist.broadcast( + tensor=self.batch_data, + src=src_rank, + group=gpc.get_group(ParallelMode.PIPELINE_NEXT) + ) + dist.broadcast( + tensor=self.batch_label, + src=src_rank, + group=gpc.get_group(ParallelMode.PIPELINE_NEXT) + ) + + # Pipeline schedule just puts data in memory + def load_batch(self): + self.check_initialized() + if self.data_iter is None: + raise RuntimeError('Dataloader is not defined.') + self.batch_pos = 0 + data, label = next(self.data_iter) + self.batch_data, self.batch_label = \ + self._move_to_device(data), self._move_to_device(label) + batch_size = self.batch_data.shape[0] + assert batch_size % self.num_microbatches == 0, \ + "Batch size should divided by the number of microbatches" + self.microbatch_size = batch_size // self.num_microbatches + if self.data_sync: + self._sync_data() + + def _get_data_slice(self, tensor): + return tensor[self.batch_pos: self.batch_pos + self.microbatch_size] + + def load_micro_batch(self): + data = self._get_data_slice(self.batch_data) + label = self._get_data_slice(self.batch_label) + self.batch_pos += self.microbatch_size + return (data,), (label,) + + @property + def num_steps(self): + return len(self.dataloader) + + def initialize(self, + dataloader, + model, + criterion, + optimizer, + lr_scheduler=None): + super().initialize(dataloader, + model, + criterion, + optimizer, + lr_scheduler=lr_scheduler) + if isinstance(self.optimizer, (ZeroRedundancyOptimizer_Level_2, + ZeroRedundancyOptimizer_Level_3)): + raise TypeError( + "Pipeline schedule is currently not compatible with ZeRO Level 2 and Level 3" + ) + + # LSG: set default dtype to fp16 for communication + if self.amp_type == AMP_TYPE.PARALLEL: + torch.set_default_dtype(torch.half) + self.logger.info( + 'default tensor dtype is set to torch.half for fp16 training', + ranks=[0]) + + def forward_step(self, input_tensor, return_tensors, return_loss=True): + """Forward step for passed-in model. If it is the first stage, the input tensor + is obtained from data_iterator, otherwise the passed-in input_tensor is used. + Returns output tensor. This is a helper function and can be ignored by users. + """ + + if input_tensor is None: + input_tensor, label = self.load_micro_batch() + if self.amp_type == AMP_TYPE.PARALLEL: + input_tensor = convert_to_fp16(input_tensor) + input_tensor = squeeze(input_tensor) + output_tensor = self.model(input_tensor) + output_tensor = squeeze(output_tensor) + + if gpc.is_last_rank(ParallelMode.PIPELINE): + if return_loss: + input_tensor, label = self.load_micro_batch() + loss_reduced = self.criterion(output_tensor, * + label) / self.num_microbatches + return_tensors.append( + tuple((output_tensor, label[0], loss_reduced))) + return loss_reduced + else: + return_tensors.append(output_tensor) + return output_tensor + + else: + return output_tensor + + def backward_step(self, input_tensor, output_tensor, output_tensor_grad): + """Backward step through the passed-in output tensor. If it is the last stage, the + output_tensor_grad is None, otherwise it is the gradients with respect to stage's output tensor. + Returns the gradients with respect to the input tensor (None if first stage). + This is a helper function and can be ignored by users. + """ + + # Retain the grad on the input_tensor. + if input_tensor is not None: + input_tensor.retain_grad() + + # Backward pass. + if output_tensor_grad is None and self.amp_type == AMP_TYPE.PARALLEL: + output_tensor = self.optimizer.scale_loss(output_tensor) + torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad) + + # Collect the grad of the input_tensor. + input_tensor_grad = None + if input_tensor is not None: + input_tensor_grad = input_tensor.grad + + return input_tensor_grad + + def forward_backward_step(self, forward_only=True, return_loss=True): + """Runs non-interleaved 1F1B schedule, with communication between pipeline stages. + Returns a tuple with losses if the last stage, an empty tuple otherwise. + + :return: (output, label, loss) + """ + + assert forward_only or return_loss, \ + 'The argument \'return_loss\' has to be True when \'forward_only\' is False, but got False.' + + self.load_batch() + num_warmup_microbatches = \ + (gpc.get_world_size(ParallelMode.PIPELINE) - + gpc.get_local_rank(ParallelMode.PIPELINE) - 1) + num_warmup_microbatches = min(num_warmup_microbatches, + self.num_microbatches) + num_microbatches_remaining = self.num_microbatches - num_warmup_microbatches + + # Input, output tensors only need to be saved when doing backward passes + input_tensors = None + output_tensors = None + if not forward_only: + input_tensors = [] + output_tensors = [] + return_tensors = [] + + # Used for tensor meta information communication + ft_shape = None + bt_shape = None + fs_checker = True + + # Run warmup forward passes. + for i in range(num_warmup_microbatches): + if not gpc.is_first_rank(ParallelMode.PIPELINE): + ft_shape = recv_tensor_meta(ft_shape) + input_tensor = recv_forward(ft_shape) + output_tensor = self.forward_step(input_tensor, + return_tensors, + return_loss=return_loss) + if not gpc.is_last_rank(ParallelMode.PIPELINE): + bt_shape = output_tensor.shape + fs_checker = send_tensor_meta(output_tensor, fs_checker) + send_forward(output_tensor) + + if not forward_only: + input_tensors.append(input_tensor) + output_tensors.append(output_tensor) + + # Before running 1F1B, need to receive first forward tensor. + # If all microbatches are run in warmup / cooldown phase, then no need to + # receive this tensor here. + if num_microbatches_remaining > 0: + if not gpc.is_first_rank(ParallelMode.PIPELINE): + ft_shape = recv_tensor_meta(ft_shape) + input_tensor = recv_forward(ft_shape) + + # Run 1F1B in steady state. + for i in range(num_microbatches_remaining): + last_iteration = (i == (num_microbatches_remaining - 1)) + + output_tensor = self.forward_step(input_tensor, + return_tensors, + return_loss=return_loss) + if forward_only: + send_forward(output_tensor) + + if not last_iteration: + input_tensor = recv_forward(ft_shape) + + else: + output_tensor_grad = send_forward_recv_backward( + output_tensor, bt_shape) + + # Add input_tensor and output_tensor to end of list. + input_tensors.append(input_tensor) + output_tensors.append(output_tensor) + + # Pop input_tensor and output_tensor from the start of the list for + # the backward pass. + input_tensor = input_tensors.pop(0) + output_tensor = output_tensors.pop(0) + + input_tensor_grad = self.backward_step(input_tensor, + output_tensor, + output_tensor_grad) + + if last_iteration: + input_tensor = None + send_backward(input_tensor_grad) + else: + input_tensor = send_backward_recv_forward( + input_tensor_grad, ft_shape) + + # Run cooldown backward passes. + if not forward_only: + for i in range(num_warmup_microbatches): + input_tensor = input_tensors.pop(0) + output_tensor = output_tensors.pop(0) + + output_tensor_grad = recv_backward(bt_shape) + + input_tensor_grad = self.backward_step(input_tensor, + output_tensor, + output_tensor_grad) + + send_backward(input_tensor_grad) + + if len(return_tensors) > 0: + if return_loss: + output, label, loss = tuple(map(list, zip(*return_tensors))) + return (torch.cat(output, dim=0), + torch.cat(label, dim=0), + sum(loss)) + else: + return tuple((torch.cat(return_tensors, dim=0), None, None)) + else: + return tuple((None, None, None)) diff --git a/colossalai/engine/schedule/_utils.py b/colossalai/engine/schedule/_utils.py new file mode 100644 index 000000000..9c4a2a19b --- /dev/null +++ b/colossalai/engine/schedule/_utils.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Union, List + +from torch import Tensor + + +def convert_to_fp16(data: Union[Tensor, List[Tensor]]): + if isinstance(data, Tensor): + ret = data.half() + elif isinstance(data, (list, tuple)): + ret = [val.half() for val in data] + else: + raise TypeError(f"Expected argument 'data' to be a Tensor or a list/tuple of Tensor, but got {type(data)}") + return ret diff --git a/colossalai/initialize.py b/colossalai/initialize.py new file mode 100644 index 000000000..35e8095b6 --- /dev/null +++ b/colossalai/initialize.py @@ -0,0 +1,371 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import argparse +import pprint +import random +from pathlib import Path +from typing import Callable, Iterable, Optional, Union + +import numpy as np +import torch +from torch.utils.data import DataLoader + +from colossalai.engine import AMP_TYPE, NoPipelineSchedule, PipelineSchedule +from colossalai.logging import get_global_dist_logger, init_global_dist_logger +from colossalai.nn import DataParallelSampler +from colossalai.nn.model.base_model import BaseModel +from .builder import (ModelInitializer, build_dataset, build_loss, + build_lr_scheduler, build_model, build_optimizer, + build_optimizer_wrapper) +from .context import Config, ParallelMode +from .core import global_context as gpc +from .utils import get_current_device, sync_model_param_in_dp + + +def parse_args(): + '''Reads user command line and uses an argument parser to parse the input arguments. + Input arguments include configuration, host, port, world size, local rank, backend for torch.distributed. + + :return: call the parse arguments function + :rtype: Namespace + ''' + parser = argparse.ArgumentParser() + parser.add_argument('--config', type=str, help='path to the config file') + parser.add_argument('--host', + type=str, + default=None, + help='the master address for distributed training') + parser.add_argument('--port', + type=str, + default=None, + help='the master port for distributed training') + parser.add_argument('--world_size', type=int, help='world size for ') + parser.add_argument('--local_rank', + type=int, + help='rank for the default process group') + parser.add_argument('--backend', + type=str, + default='nccl', + help='backend for torch.distributed') + return parser.parse_args() + + +def init_dist(config: Union[str, dict] = None, + local_rank: int = None, + world_size: int = None, + host: str = None, + port: str = None, + backend: str = None): + '''This function first parses the configuration arguments, using :func:parse_args() in case one of the input arguments are not given. + Then initialize and set distributed environment by calling global_context's functions. + + :param config: config file or config file path are both acceptable + :type config: Union[str, dict], optional + :param local_rank: rank for the default process group, defaults to None + :type local_rank: int, optional + :param world_size: world size of GPUs, defaults to None + :type world_size: int, optional + :param host: the master address for distributed training, defaults to None + :type host: str, optional + :param port: the master port for distributed training, defaults to None + :type port: str, optional + :param backend: backend for torch.distributed, defaults to None + :type backend: str, optional + :raises Exception: raise exception when config type is wrong + ''' + args = [config, local_rank, world_size, host, port, backend] + arg_given = [arg is not None for arg in args] + + if not all(arg_given): + args = parse_args() + + if config is None: + config = args.config + if local_rank is None: + local_rank = args.local_rank + if world_size is None: + world_size = args.world_size + if host is None: + host = args.host + if port is None: + port = args.port + if backend is None: + backend = args.backend + args = Config( + dict(config=config, + host=host, + port=port, + world_size=world_size, + local_rank=local_rank, + backend=backend)) + + # set distributed settings + dist_args = Config( + dict(local_rank=args.local_rank, + world_size=args.world_size, + backend=args.backend)) + + gpc.set_dist_args(dist_args) + + # set config + if isinstance(args.config, dict): + cfg = args.config + elif isinstance(args.config, (str, Path)): + cfg = Config.from_file(args.config) + else: + raise Exception('Config type error: {}'.format(type(args.config))) + gpc.load_config(cfg) + + # init dist groups + gpc.init_global_dist(args.host, args.port) + gpc.init_parallel_groups() + + # init dist logger + init_global_dist_logger() + + # set cuda device + if torch.cuda.is_available(): + gpc.set_device() + + +def get_dataloader(dataset, seed=1024, add_sampler_if_possible=False, **kwargs): + '''Set up a deterministic dataloader (also configure seed workers, samplers and whether shuffle or not) + + .. note: when pipeline parallel is enabled, shuffle cannot be True + as it will result in mismatch between input data on the 1st + stage and label on the last stage + + :param dataset: a :class:utils.data.dataset dataset + :param seed: random worker seed, defaults to 1024 + :type seed: int, optional + :param add_sampler_if_possible: [description], defaults to False + :type add_sampler_if_possible: bool, optional + :return: a :class:utils.data.dataset dataloader + :rtype: torch.utils.data.dataset + ''' + _kwargs = kwargs.copy() + if 'shuffle' in _kwargs: + shuffle = _kwargs.pop('shuffle') + else: + shuffle = False + + if add_sampler_if_possible and gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(ParallelMode.DATA) > 1: + sampler = DataParallelSampler(dataset, shuffle=shuffle) + else: + sampler = None + + # Deterministic dataloader + def seed_worker(worker_id): + worker_seed = seed + np.random.seed(worker_seed) + torch.manual_seed(worker_seed) + random.seed(worker_seed) + + if sampler is None: + return DataLoader(dataset, + worker_init_fn=seed_worker, + shuffle=shuffle, + **_kwargs) + else: + return DataLoader(dataset, + sampler=sampler, + worker_init_fn=seed_worker, + **_kwargs) + + +def initialize(config: Union[str, dict] = None, + local_rank: int = None, + world_size: int = None, + host: str = None, + port: str = None, + backend: str = None, + train_dataloader: Optional[Union[Iterable, Callable]] = None, + test_dataloader: Optional[Union[Iterable, Callable]] = None, + ): + '''Core function that initializes distributed environment, logger, cudnn, data, model, loss function, optimizer, and lr_scheduler(their configs are in gpc.config). + + :param config: config file or config file path are both acceptable + :type config: Union[str, dict], optional + :param local_rank: rank for the default process group, defaults to None + :type local_rank: int, optional + :param world_size: world size of GPUs, defaults to None + :type world_size: int, optional + :param host: the master address for distributed training, defaults to None + :type host: str, optional + :param port: the master port for distributed training, defaults to None + :type port: str, optional + :param backend: backend for torch.distributed, defaults to None + :type backend: str, optional + :param train_dataloader: If None, the config is used to build a dataloder; Else, it should be a dataloader object or a function with no arguments which can build a dataloader, defaults to None + :type train_dataloader: Optional[Union[Iterable, Callable]], optional + :param test_dataloader: If None, the config is used to build a dataloder; Else, it should be a dataloader object or a function with no arguments which can build a dataloader, defaults to None + :type test_dataloader: Optional[Union[Iterable, Callable]], optional + :return: (model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler) + :rtype: tuple + ''' + # initialize distributed environment + init_dist(config=config, + local_rank=local_rank, + world_size=world_size, + host=host, + port=port, + backend=backend) + + # init logger + logger = get_global_dist_logger() + logger.info(f'Distributed environment is initialized, ' + f'data parallel size: {gpc.data_parallel_size}, pipeline parallel size: {gpc.pipeline_parallel_size}, ' + f'tensor parallel size: {gpc.tensor_parallel_size}', ranks=[0]) + + # print config + logger.info(f"\n========== Your Config ========\n" + f"{pprint.pformat(gpc.config)}\n" + f"================================", ranks=[0]) + + # cudnn + cudnn_benchmark = gpc.config.get('cudnn_benchmark', True) + cudnn_deterministic = gpc.config.get('cudnn_deterministic', False) + torch.backends.cudnn.benchmark = cudnn_benchmark + torch.backends.cudnn.deterministic = cudnn_deterministic + logger.info( + f"cuDNN benchmark = {cudnn_benchmark}, deterministic = {cudnn_deterministic}", ranks=[0]) + + # set seed, cuda seed is only set when cuda is avail + gpc.set_seed() + + # return_items = list() + + # check fp16 and zero + should_convert_model_to_half = False + should_wrap_fp16_optimizer = False + should_wrap_zero_optimizer_level_2_3 = False + + if hasattr(gpc.config, 'fp16'): + fp16_mode = gpc.config.fp16.mode + if fp16_mode == AMP_TYPE.PARALLEL: + should_convert_model_to_half = True + should_wrap_fp16_optimizer = True + + if hasattr(gpc.config, 'zero'): + should_wrap_zero_optimizer_level_2_3 = True + zero_type = gpc.config.zero.type + if zero_type in ['ZeroRedundancyOptimizer_Level_2', 'ZeroRedundancyOptimizer_Level_3']: + should_convert_model_to_half = True + assert not should_wrap_fp16_optimizer, \ + 'AMP_TYPE.PARALLEL is mutually exclusive with zero level 2 and 3' + + # build model + logger.info('Building model ...', ranks=[0]) + assert hasattr( + gpc.config, 'model'), "Build error: configuration 'model' is missing" + if gpc.pipeline_parallel_size > 1: + model = ModelInitializer(gpc.config.model, 1, verbose=True) + model = model.model_initialize() + else: + model = build_model(gpc.config.model) + if isinstance(model, BaseModel): + model.build_from_cfg() + model = model.to(get_current_device()) + sync_model_param_in_dp(model) + logger.info('Model is created', ranks=[0]) + + if should_convert_model_to_half: + model = model.half() + logger.info("Model is cast to fp16", ranks=[0]) + + # training data + if callable(train_dataloader): + logger.info( + f'Build train data loader from {train_dataloader}', ranks=[0]) + train_dataloader = train_dataloader() + if train_dataloader is None and hasattr(gpc.config, 'train_data'): + logger.info('Preparing data ...', ranks=[0]) + # assert hasattr(gpc.config, 'train_data'), "Build error: configuration 'train_data' is missing." + train_dataset = build_dataset(gpc.config.train_data.dataset) + logger.info('Train dataset is ready.', ranks=[0]) + + train_dataloader = get_dataloader(train_dataset, + gpc.config.get('seed', 1024), + True, + **gpc.config.train_data.dataloader, + ) + logger.info( + f'Loaded {len(train_dataset)} samples in {len(train_dataloader)} batches for training', ranks=[0]) + + if callable(test_dataloader): + logger.info( + f'Build test data loader from {test_dataloader}', ranks=[0]) + test_dataloader = test_dataloader() + # testing data, allowed to be None + if test_dataloader is None and hasattr(gpc.config, 'test_data'): + test_dataset = build_dataset(gpc.config.test_data.dataset) + test_dataloader = get_dataloader( + test_dataset, add_sampler_if_possible=True, **gpc.config.test_data.dataloader) + logger.info( + f'Loaded {len(test_dataset)} samples in {len(test_dataloader)} batches for testing', ranks=[0]) + + # build loss function + assert hasattr(gpc.config, 'loss'), \ + 'Build error: configuration \'loss\' is missing.' + criterion = build_loss(gpc.config.loss) + logger.info('Loss function is created', ranks=[0]) + + # build optimizer + assert hasattr(gpc.config, 'optimizer'), \ + "Build error: configuration 'optimizer' is missing." + optim_type = gpc.config.optimizer.type + is_pytorch_native_zero_level_1 = optim_type == 'ZeroRedundancyOptimizer' + if is_pytorch_native_zero_level_1: + original_cfg_copy = gpc.config.optimizer.copy() + original_cfg_copy.pop('type') + cfg = dict(type=optim_type, process_group=gpc.get_group( + ParallelMode.DATA), **original_cfg_copy) + optimizer = build_optimizer(cfg, model) + else: + optimizer = build_optimizer(gpc.config.optimizer, model) + + if should_wrap_zero_optimizer_level_2_3: + optimizer = build_optimizer_wrapper(gpc.config.zero, optimizer, model) + + if should_wrap_fp16_optimizer: + # replace the field mode with type + fp16_cfg = gpc.config.fp16.copy() + amp_type = fp16_cfg.pop('mode') + assert amp_type == AMP_TYPE.PARALLEL, 'FP Optimizer should only be used for AMP_TYPE.PARALLEL' + fp16_cfg['type'] = 'FP16Optimizer' + optimizer = build_optimizer_wrapper(fp16_cfg, optimizer) + logger.info('Optimizer is created', ranks=[0]) + + lr_scheduler = None + if hasattr(gpc.config, 'lr_scheduler'): + if hasattr(gpc.config, 'num_steps'): + total_steps = gpc.config.num_steps + elif hasattr(gpc.config, 'num_epochs'): + total_steps = int(gpc.config.num_epochs * len(train_dataloader)) + else: + raise Exception( + 'Please specify training stopping criterion num_steps or num_epochs in your configuration.' + ) + lr_scheduler = build_lr_scheduler(gpc.config.lr_scheduler, optimizer, + total_steps, len(train_dataloader)) + logger.info('Learning rate scheduler is created', ranks=[0]) + + # pipeline or no pipeline schedule + if hasattr(gpc.config, 'fp16'): + amp_type = gpc.config.fp16.mode + amp_cfg = gpc.config.fp16.copy() + amp_cfg.pop('mode') + else: + amp_type = None + amp_cfg = None + + if gpc.is_initialized(ParallelMode.PIPELINE) and gpc.get_world_size(ParallelMode.PIPELINE) > 1: + assert hasattr(gpc.config, + 'schedule'), "Config 'schedule' not found in your configuration file for pipeline parallel training" + schedule = PipelineSchedule( + amp_type=amp_type, amp_config=amp_cfg, **gpc.config.schedule.copy()) + else: + schedule = NoPipelineSchedule(amp_type=amp_type, amp_config=amp_cfg) + + return model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler diff --git a/colossalai/logging/__init__.py b/colossalai/logging/__init__.py new file mode 100644 index 000000000..71657557f --- /dev/null +++ b/colossalai/logging/__init__.py @@ -0,0 +1,26 @@ +from colossalai.core import global_context as gpc +from .logging import DistributedLogger + +__all__ = ['get_global_dist_logger', 'get_dist_logger', 'DistributedLogger', 'init_global_dist_logger'] + +_GLOBAL_LOGGER: DistributedLogger = None + + +def get_dist_logger(name, level='INFO', root_path: str = None, mode='a'): + return DistributedLogger(name=name, level=level, root_path=root_path, mode=mode) + + +def get_global_dist_logger(): + assert _GLOBAL_LOGGER is not None, 'Global distributed logger is not initialized' + return _GLOBAL_LOGGER + + +def init_global_dist_logger(): + rank = gpc.get_global_rank() + if hasattr(gpc.config, 'logging'): + logger = get_dist_logger(name=f'rank_{rank}', **gpc.config.logging) + else: + logger = get_dist_logger(name=f'rank_{rank}', level='INFO') + global _GLOBAL_LOGGER + assert _GLOBAL_LOGGER is None, 'Global distributed logger has already been initialized' + _GLOBAL_LOGGER = logger diff --git a/colossalai/logging/logging.py b/colossalai/logging/logging.py new file mode 100644 index 000000000..b8a79c491 --- /dev/null +++ b/colossalai/logging/logging.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import logging +from pathlib import Path + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + +_FORMAT = 'colossalai - %(name)s - %(asctime)s %(levelname)s: %(message)s' +logging.basicConfig(level=logging.INFO, format=_FORMAT) + + +class DistributedLogger: + """This is a distributed event logger class essentially based on :class:`logging`. + + :param name: The name of the logger + :type name: str + :param level: The threshold for the logger. Logging messages which are less severe than `level` + will be ignored + :type level: str + :param root_path: The root path where logs are stored + :type root_path: str, optional + :param mode: The mode that the file is opened in. Defaults to 'a' + :type mode: str, optional + """ + + def __init__(self, name, level='INFO', root_path: str = None, mode='a'): + self._logger = logging.getLogger(name) + self._logger.setLevel(getattr(logging, level)) + + if root_path is not None: + log_root_path = Path(root_path) + # create path if not exists + log_root_path.mkdir(parents=True, exist_ok=True) + log_path = log_root_path.joinpath(f'{name}.log') + file_handler = logging.FileHandler(log_path, mode) + file_handler.setLevel(getattr(logging, level)) + formatter = logging.Formatter(_FORMAT) + file_handler.setFormatter(formatter) + self._logger.addHandler(file_handler) + + def _log(self, level, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None): + if ranks is None: + getattr(self._logger, level)(message) + else: + local_rank = gpc.get_local_rank(parallel_mode) + if local_rank in ranks: + getattr(self._logger, level)(message) + + def info(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None): + """Stores an info log message. + + :param message: + :type message: + :param parallel_mode: + :type parallel_mode: + :param ranks: + :type ranks: + """ + self._log('info', message, parallel_mode, ranks) + + def warning(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None): + """Stores a warning log message. + + :param message: The message to be logged + :type message: str + :param parallel_mode: The parallel mode used for logging. Defaults to ParallelMode.GLOBAL + :type parallel_mode: :class:`colossalai.context.parallel_mode.ParallelMode` + :param ranks: List of parallel ranks + :type ranks: list + """ + self._log('warning', message, parallel_mode, ranks) + + def debug(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None): + """Stores a debug log message. + + :param message: The message to be logged + :type message: str + :param parallel_mode: The parallel mode used for logging. Defaults to ParallelMode.GLOBAL + :type parallel_mode: :class:`colossalai.context.parallel_mode.ParallelMode` + :param ranks: List of parallel ranks + :type ranks: list + """ + self._log('debug', message, parallel_mode, ranks) + + def error(self, message: str, parallel_mode: ParallelMode = ParallelMode.GLOBAL, ranks: list = None): + """Stores an error log message. + + :param message: The message to be logged + :type message: str + :param parallel_mode: The parallel mode used for logging. Defaults to ParallelMode.GLOBAL + :type parallel_mode: :class:`colossalai.context.parallel_mode.ParallelMode` + :param ranks: List of parallel ranks + :type ranks: list + """ + self._log('error', message, parallel_mode, ranks) diff --git a/colossalai/nn/__init__.py b/colossalai/nn/__init__.py new file mode 100644 index 000000000..69fd61594 --- /dev/null +++ b/colossalai/nn/__init__.py @@ -0,0 +1,6 @@ +from .data import * +from .layer import * +from .loss import * +from .lr_scheduler import * +from .model import * +from .optimizer import * diff --git a/colossalai/nn/data/__init__.py b/colossalai/nn/data/__init__.py new file mode 100644 index 000000000..d94afe2da --- /dev/null +++ b/colossalai/nn/data/__init__.py @@ -0,0 +1,3 @@ +from .caltech101_dataset import Caltech101Dataset +from .cifar10_dataset import CIFAR10Dataset +from .sampler import * diff --git a/colossalai/nn/data/_utils.py b/colossalai/nn/data/_utils.py new file mode 100644 index 000000000..08d77e0da --- /dev/null +++ b/colossalai/nn/data/_utils.py @@ -0,0 +1,14 @@ +import numpy as np + + +def pil_img_to_numpy(pil_img): + """convert a PIL image to numpy nd-array + + :param pil_img: a PIL image + :type pil_img: PIL.Image + :return: a nd-array + :rtype: numpy.ndarray + """ + np_img = np.array(pil_img) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img diff --git a/colossalai/nn/data/base_dataset.py b/colossalai/nn/data/base_dataset.py new file mode 100644 index 000000000..730b37649 --- /dev/null +++ b/colossalai/nn/data/base_dataset.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC + +from torch.utils.data import Dataset +from torchvision.transforms import transforms + +from colossalai.builder import build_transform + + +class BaseDataset(Dataset, ABC): + + def __init__(self, transform_pipeline: list): + transform_list = [build_transform(cfg) for cfg in transform_pipeline] + transform = transforms.Compose(transform_list) + self._transform_pipeline = transform diff --git a/colossalai/nn/data/caltech101_dataset.py b/colossalai/nn/data/caltech101_dataset.py new file mode 100644 index 000000000..b1dc89b68 --- /dev/null +++ b/colossalai/nn/data/caltech101_dataset.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.distributed as dist +from torchvision.datasets import Caltech101 + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module +class Caltech101Dataset(BaseDataset): + """`Caltech 101 `_ Dataset. + + :param transform_pipeline: A list of functions' config, which takes in an PIL image + and returns a transformed version + :type transform_pipeline: list + """ + + def __init__(self, transform_pipeline: list, *args, **kwargs): + super().__init__(transform_pipeline) + if gpc.is_initialized(ParallelMode.GLOBAL) and gpc.get_global_rank() != 0: + dist.barrier() + self._dataset = Caltech101( + transform=self._transform_pipeline, *args, **kwargs) + if gpc.is_initialized(ParallelMode.GLOBAL) and gpc.get_global_rank() == 0: + dist.barrier() + + def __len__(self): + return len(self._dataset) + + def __getitem__(self, item): + """ + + :param item: Index + :type item: int + :return: ((image,), (target,)) where the type of target specified by target_type. + :rtype: tuple + """ + img, label = self._dataset.__getitem__(item) + return (img,), (label,) diff --git a/colossalai/nn/data/cifar10_dataset.py b/colossalai/nn/data/cifar10_dataset.py new file mode 100644 index 000000000..a0ce139a2 --- /dev/null +++ b/colossalai/nn/data/cifar10_dataset.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.distributed as dist +from torchvision.datasets import CIFAR10 + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module +class CIFAR10Dataset(BaseDataset): + """`CIFAR10 `_ Dataset. + + :param transform_pipeline: A list of functions' config, which takes in an PIL image + and returns a transformed version + :type transform_pipeline: list + """ + + def __init__(self, transform_pipeline: list, *args, **kwargs): + super().__init__(transform_pipeline) + if gpc.is_initialized(ParallelMode.GLOBAL) and gpc.get_global_rank() != 0: + dist.barrier() + self._dataset = CIFAR10(transform=self._transform_pipeline, + *args, + **kwargs) + if gpc.is_initialized(ParallelMode.GLOBAL) and gpc.get_global_rank() == 0: + dist.barrier() + + def __len__(self): + return len(self._dataset) + + def __getitem__(self, item): + """ + + :param item: Index + :type item: int + :return: ((image,), (target,)) where the type of target specified by target_type. + :rtype: tuple + """ + img, label = self._dataset.__getitem__(item) + return (img,), (label,) diff --git a/colossalai/nn/data/sampler/__init__.py b/colossalai/nn/data/sampler/__init__.py new file mode 100644 index 000000000..471add313 --- /dev/null +++ b/colossalai/nn/data/sampler/__init__.py @@ -0,0 +1,4 @@ +from .base_sampler import BaseSampler +from .data_parallel_sampler import DataParallelSampler + +__all__ = ['BaseSampler', 'DataParallelSampler'] diff --git a/colossalai/nn/data/sampler/base_sampler.py b/colossalai/nn/data/sampler/base_sampler.py new file mode 100644 index 000000000..89f3bca5b --- /dev/null +++ b/colossalai/nn/data/sampler/base_sampler.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC, abstractmethod + + +class BaseSampler(ABC): + + def __init__(self, dataset, batch_size): + self.dataset = dataset + self.batch_size = batch_size + + @abstractmethod + def __len__(self): + pass + + @abstractmethod + def __iter__(self): + pass diff --git a/colossalai/nn/data/sampler/data_parallel_sampler.py b/colossalai/nn/data/sampler/data_parallel_sampler.py new file mode 100644 index 000000000..2b3817e03 --- /dev/null +++ b/colossalai/nn/data/sampler/data_parallel_sampler.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +# adpated from torch.utils.data.DistributedSampler + +import math +from typing import TypeVar, Iterator + +import torch +from torch.utils.data import Sampler, Dataset + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import SAMPLERS + +T_co = TypeVar('T_co', covariant=True) + + +@SAMPLERS.register_module +class DataParallelSampler(Sampler): + """A data sampler for distributed data parallelism + + :param dataset: a Dataset instance + :type dataset: torch.utils.data.Dataset + :param shuffle: whether to shuffle data, defaults to False + :type shuffle: bool, optional + :param seed: the random seed, defaults to 0 + :type seed: int, optional + :param drop_last: set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller, defaults to False + :type drop_last: bool, optional + """ + + def __init__(self, + dataset: Dataset, + shuffle: bool = False, + seed: int = 0, + drop_last: bool = False) -> None: + self.dataset = dataset + self.num_replicas = gpc.get_world_size(ParallelMode.DATA) + self.rank = gpc.get_local_rank(ParallelMode.DATA) + self.epoch = 0 + self.drop_last = drop_last + # If the dataset length is evenly divisible by # of replicas, then there + # is no need to drop any data, since the dataset will be split equally. + # type: ignore[arg-type] + if self.drop_last and len(self.dataset) % self.num_replicas != 0: + # Split to nearest available length that is evenly divisible. + # This is to ensure each rank receives the same amount of data when + # using this Sampler. + self.num_samples = math.ceil( + # `type:ignore` is required because Dataset cannot provide a default __len__ + # see NOTE in pytorch/torch/utils/data/sampler.py + (len(self.dataset) - self.num_replicas) / \ + self.num_replicas # type: ignore[arg-type] + ) + else: + self.num_samples = math.ceil( + len(self.dataset) / self.num_replicas) # type: ignore[arg-type] + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + self.seed = seed + + def __iter__(self) -> Iterator[T_co]: + if self.shuffle: + # deterministically shuffle based on epoch and seed + g = torch.Generator() + g.manual_seed(self.seed + self.epoch) + # type: ignore[arg-type] + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) # type: ignore[arg-type] + + if not self.drop_last: + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + if padding_size <= len(indices): + indices += indices[:padding_size] + else: + indices += (indices * math.ceil(padding_size / + len(indices)))[:padding_size] + else: + # remove tail of data to make it evenly divisible. + indices = indices[:self.total_size] + assert len(indices) == self.total_size + + # subsample + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self) -> int: + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + r"""Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas + use a different random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + + :param epoch: Epoch number. + :type epoch: int + """ + self.epoch = epoch diff --git a/colossalai/nn/layer/__init__.py b/colossalai/nn/layer/__init__.py new file mode 100644 index 000000000..1456a8a56 --- /dev/null +++ b/colossalai/nn/layer/__init__.py @@ -0,0 +1,9 @@ +from .parallel_1d import * +from .parallel_2d import * +from .parallel_2p5d import * +from .parallel_3d import * +from .parallel_sequence import * +from .parallel_vision_transformer import * +from .vanilla_resnet import * +from .vanilla_vision_transformer import * +from .wrapper import * diff --git a/colossalai/nn/layer/_common_utils.py b/colossalai/nn/layer/_common_utils.py new file mode 100644 index 000000000..69f63ea5a --- /dev/null +++ b/colossalai/nn/layer/_common_utils.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math + +import torch +from torch import Tensor +from torch import nn +from colossalai.utils import checkpoint + +from colossalai.constants import IS_TENSOR_PARALLEL + + +def divide(numerator, denominator): + """ only allow exact division """ + assert numerator % denominator == 0, \ + '{} is not divisible by {}'.format(numerator, denominator) + return numerator // denominator + + +def gelu(x: Tensor) -> Tensor: + """Implementation of the gelu activation function. + For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + """ + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def swish(x: Tensor) -> Tensor: + return x * torch.sigmoid(x) + + +ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} + + +def set_tensor_parallel_attribute(param): + if not hasattr(param, IS_TENSOR_PARALLEL): + setattr(param, IS_TENSOR_PARALLEL, True) + + +class CheckpointModule(nn.Module): + def __init__(self, checkpoint: bool = True): + super().__init__() + self.checkpoint = checkpoint + self._use_checkpoint = checkpoint + + def _forward(self, *args): + raise NotImplementedError( + 'CheckpointModule should implement _forward method instead of origin forward') + + def forward(self, *args): + if self._use_checkpoint: + return checkpoint(self._forward, *args) + else: + return self._forward(*args) + + def train(self, mode: bool = True): + self._use_checkpoint = self.checkpoint + return super().train(mode=mode) + + def eval(self): + self._use_checkpoint = False + return super().eval() diff --git a/colossalai/nn/layer/_parallel_utilities.py b/colossalai/nn/layer/_parallel_utilities.py new file mode 100644 index 000000000..6ce5c6df3 --- /dev/null +++ b/colossalai/nn/layer/_parallel_utilities.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +import torch.distributed as dist + +from colossalai.core import global_context as gpc + + +def _reduce(input_, parallel_mode): + # skip if only one rank involved + if gpc.get_world_size(parallel_mode) == 1: + return input_ + dist.all_reduce(input_, group=gpc.get_group(parallel_mode)) + + return input_ + + +def _split(input_, parallel_mode, dim=-1): + # skip if only one rank involved + world_size = gpc.get_world_size(parallel_mode) + if world_size == 1: + return input_ + + # Split along last dimension. + dim_size = input_.size(dim) + assert dim_size % world_size == 0, \ + f'The dimension to split ({dim_size}) is not a multiple of world size ({world_size}), ' \ + f'cannot split tensor evenly' + + tensor_list = torch.split(input_, dim_size // world_size, dim=dim) + rank = gpc.get_local_rank(parallel_mode) + output = tensor_list[rank].contiguous() + + return output + + +def _gather(input_, parallel_mode, dim=-1): + # skip if only one rank involved + world_size = gpc.get_world_size(parallel_mode) + if world_size == 1: + return input_ + + # all gather + rank = gpc.get_local_rank(parallel_mode) + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + tensor_list[rank] = input_ + torch.distributed.all_gather(tensor_list, input_, group=gpc.get_group(parallel_mode)) + + # concat + output = torch.cat(tensor_list, dim=dim).contiguous() + + return output + + +class _ReduceGrad(torch.autograd.Function): + """Pass the input to the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return input_ + + @staticmethod + def forward(ctx, input_, parallel_mode): + ctx.mode = parallel_mode + return input_ + + @staticmethod + def backward(ctx, grad_output): + return _reduce(grad_output, ctx.mode), None + + +class _ReduceInput(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + return _reduce(input_) + + @staticmethod + def forward(ctx, input_, parallel_mode): + return _reduce(input_, parallel_mode) + + @staticmethod + def backward(ctx, grad_output): + return grad_output, None + + +class _SplitForwardGatherBackward(torch.autograd.Function): + """Split the input and keep only the corresponding chuck to the rank.""" + + @staticmethod + def symbolic(graph, input_): + return _split(input_) + + @staticmethod + def forward(ctx, input_, parallel_mode, dim): + ctx.mode = parallel_mode + ctx.dim = dim + return _split(input_, parallel_mode, dim) + + @staticmethod + def backward(ctx, grad_output): + return _gather(grad_output, ctx.mode, ctx.dim), None, None + + +class _GatherForwardSplitBackward(torch.autograd.Function): + """Gather the input from model parallel region and concatinate.""" + + @staticmethod + def symbolic(graph, input_): + return _gather(input_) + + @staticmethod + def forward(ctx, input_, parallel_mode, dim): + ctx.mode = parallel_mode + ctx.dim = dim + return _gather(input_, parallel_mode, dim) + + @staticmethod + def backward(ctx, grad_output): + return _split(grad_output, ctx.mode, ctx.dim), None, None + + +def reduce_grad(input_, parallel_mode): + return _ReduceGrad.apply(input_, parallel_mode) + + +def reduce_input(input_, parallel_mode): + return _ReduceInput.apply(input_, parallel_mode) + + +def split_forward_gather_backward(input_, parallel_mode, dim): + return _SplitForwardGatherBackward.apply(input_, parallel_mode, dim) + + +def gather_forward_split_backward(input_, parallel_mode, dim): + return _GatherForwardSplitBackward.apply(input_, parallel_mode, dim) diff --git a/colossalai/nn/layer/base_layer.py b/colossalai/nn/layer/base_layer.py new file mode 100644 index 000000000..fd0d6ef5e --- /dev/null +++ b/colossalai/nn/layer/base_layer.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.nn as nn + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc + + +class ParallelLayer(nn.Module): + + def __init__(self): + super().__init__() + self.data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank( + ParallelMode.DATA) + self.data_parallel_size = 1 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_world_size( + ParallelMode.DATA) + + self.tensor_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.TENSOR) else gpc.get_local_rank( + ParallelMode.TENSOR) + self.tensor_parallel_size = 1 if not gpc.is_initialized(ParallelMode.TENSOR) else gpc.get_world_size( + ParallelMode.TENSOR) + + self.pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + self.pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) diff --git a/colossalai/nn/layer/parallel_1d/__init__.py b/colossalai/nn/layer/parallel_1d/__init__.py new file mode 100644 index 000000000..9e7df549f --- /dev/null +++ b/colossalai/nn/layer/parallel_1d/__init__.py @@ -0,0 +1,5 @@ +from .layers import Linear1D_Col, Linear1D_Row + +__all__ = [ + 'Linear1D_Col', 'Linear1D_Row', +] diff --git a/colossalai/nn/layer/parallel_1d/_utils.py b/colossalai/nn/layer/parallel_1d/_utils.py new file mode 100644 index 000000000..00d221e78 --- /dev/null +++ b/colossalai/nn/layer/parallel_1d/_utils.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from .._common_utils import divide + + +def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank): + index_f = rank * per_partition_vocab_size + index_l = index_f + per_partition_vocab_size + return index_f, index_l + + +def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size): + per_partition_vocab_size = divide(global_vocab_size, world_size) + return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank) diff --git a/colossalai/nn/layer/parallel_1d/layers.py b/colossalai/nn/layer/parallel_1d/layers.py new file mode 100644 index 000000000..572eca777 --- /dev/null +++ b/colossalai/nn/layer/parallel_1d/layers.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.nn.init as init +from torch import Tensor +from torch.nn.parameter import Parameter +from typing import Tuple + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import LAYERS +from colossalai.utils import get_current_device +from .._common_utils import divide +from .._parallel_utilities import reduce_grad, reduce_input, gather_forward_split_backward, \ + split_forward_gather_backward +from ..base_layer import ParallelLayer + + +class Linear1D_Col(ParallelLayer): + """Linear layer with column parallelism. + + The linear layer is defined as :math:`Y = XA + b`. A is parallelized along + its second dimension as :math:`A = [A_1, ..., A_p]`. + + :param in_features: first dimension of matrix A. + :type in_features: int + :param output_size: second dimension of matrix A. + :type output_size: int + :param bias: If true, add bias, defaults to True + :type bias: bool, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param gather_output: If true, call all-gether on output and make Y avaiable + to all GPUs, otherwise, every GPU will have its output + which is :math:`Y_i = XA_i`, defaults to False + :type gather_output: bool, optional + """ + + def __init__(self, + in_features: int, + output_size: int, + bias: bool = True, + dtype: torch.dtype = None, + gather_output: bool = False): + super().__init__() + + # Keep input parameters + self.input_size = in_features + self.output_size = output_size + self.gather_output = gather_output + self.skip_bias_add = not bias + + world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D) + self.output_size_per_partition = divide(output_size, world_size) + + # Parameters. + # Initialize weight. + factory_kwargs = {'device': get_current_device(), 'dtype': dtype} + self.weight = Parameter(torch.empty( + self.output_size_per_partition, self.input_size, + **factory_kwargs)) + + if bias: + self.bias = Parameter(torch.empty( + self.output_size_per_partition, + **factory_kwargs)) + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter('bias', None) + + def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: + # Set up backprop all-reduce. + input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D) + # Matrix multiply. + + bias = self.bias if not self.skip_bias_add else None + output_parallel = F.linear(input_parallel, self.weight, bias) + if self.gather_output: + # All-gather across the partitions. + output = gather_forward_split_backward( + output_parallel, ParallelMode.PARALLEL_1D, dim=-1) + else: + output = output_parallel + if self.skip_bias_add: + return output, self.bias + else: + return output + + +@LAYERS.register_module +class Linear1D_Row(ParallelLayer): + """ Linear layer with row parallelism + + :param in_features: size of each input sample + :type in_features: int + :param out_features: size of each output sample + :type out_features: int + :param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True + :type bias: bool, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param parallel_input: If set to ``False``, it's assumed that the input is splitted, defaults to False + :type parallel_input: bool, optional + """ + + def __init__(self, + in_features: int, + out_features: int, + bias: bool = True, + dtype: torch.dtype = None, + parallel_input: bool = False + ): + super().__init__() + + # Keep input parameters + self.in_features = in_features + self.out_features = out_features + self.parallel_input = parallel_input + self.skip_bias_add = not bias + + # Divide the weight matrix along the last dimension. + world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D) + self.input_size_per_partition = divide(in_features, world_size) + + # Parameters. + # Initialize weight. + factory_kwargs = {'device': get_current_device(), 'dtype': dtype} + self.weight = Parameter(torch.empty( + self.out_features, + self.input_size_per_partition, + **factory_kwargs)) + + if bias: + self.bias = Parameter(torch.empty( + self.out_features, + **factory_kwargs + )) + + # Always initialize bias to zero. + with torch.no_grad(): + self.bias.zero_() + else: + self.register_parameter('bias', None) + + def reset_parameters(self) -> None: + init.xavier_normal_(self.weight) + + def forward(self, input_: Tensor) -> Tensor: + # Set up backprop all-reduce. + if self.parallel_input: + input_ = input_ + else: + input_ = split_forward_gather_backward( + input_, ParallelMode.PARALLEL_1D, dim=-1) + + output_parallel = F.linear(input_, self.weight) + output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D) + + if not self.skip_bias_add: + output = output + self.bias + return output diff --git a/colossalai/nn/layer/parallel_2d/__init__.py b/colossalai/nn/layer/parallel_2d/__init__.py new file mode 100644 index 000000000..22a5b5d02 --- /dev/null +++ b/colossalai/nn/layer/parallel_2d/__init__.py @@ -0,0 +1,11 @@ +from ._operation import Matmul_AB_2D, Matmul_ABT_2D, Matmul_ATB_2D, Add_Bias_2D, matmul_2d +from ._transformer import TransformerMLP2D, TransformerSelfAttention2D, TransformerLayer2D +from ._vit import ViTMLP2D, ViTSelfAttention2D, ViTHead2D, ViTPatchEmbedding2D, ViTTokenFuser2D, ViTInputSplitter2D +from .layers import Linear2D, LayerNorm2D + +__all__ = [ + 'Matmul_AB_2D', 'Matmul_ABT_2D', 'Matmul_ATB_2D', 'Add_Bias_2D', 'matmul_2d', + 'TransformerMLP2D', 'TransformerSelfAttention2D', 'TransformerLayer2D', + 'ViTMLP2D', 'ViTSelfAttention2D', 'ViTHead2D', 'ViTPatchEmbedding2D', 'ViTTokenFuser2D', 'ViTInputSplitter2D', + 'Linear2D', 'LayerNorm2D' +] diff --git a/colossalai/nn/layer/parallel_2d/_operation.py b/colossalai/nn/layer/parallel_2d/_operation.py new file mode 100644 index 000000000..2c7eb8ac6 --- /dev/null +++ b/colossalai/nn/layer/parallel_2d/_operation.py @@ -0,0 +1,522 @@ +from typing import Any, Tuple + +import torch +import torch.distributed as dist +from torch import Tensor + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import get_current_device + + +def matmul_2d(a, + b, + summa_dim, + out_shape, + row_rank=None, + col_rank=None, + row_parallel_mode=ParallelMode.PARALLEL_2D_ROW, + col_parallel_mode=ParallelMode.PARALLEL_2D_COL, + ): + """Matrix multiplication for 2D parallelism + + :param a: matrix :math:`A` + :type a: torch.tensor + :param b: matrix :math:`B` + :type b: torch.tensor + :param summa_dim: dimension of SUMMA fo 2D parallelism + :type summa_dim: int + :param out_shape: shape of output tensor + :type out_shape: tuple + :param row_rank: the rank of row, defaults to None + :type row_rank: int, optional + :param col_rank: the rank of column, defaults to None + :type col_rank: int, optional + :param row_parallel_mode: row parallel mode, defaults to ParallelMode.PARALLEL_2D_ROW + :type row_parallel_mode: str, optional + :param col_parallel_mode: column parallel mode, defaults to ParallelMode.PARALLEL_2D_COL + :type col_parallel_mode: str, optional + :return: :math:`C = AB` + :rtype: torch.tensor + """ + if row_rank is None: + row_rank = gpc.get_local_rank(col_parallel_mode) + if col_rank is None: + col_rank = gpc.get_local_rank(row_parallel_mode) + + data_parallel_rank = 0 if not gpc.is_initialized( + ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = summa_dim ** 2 + return Matmul_AB_2D(a, b, summa_dim, out_shape, row_rank, col_rank, row_parallel_mode, col_parallel_mode, + data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size + ) + + +class Matmul_AB_2D(torch.autograd.Function): + """Matrix multiplication for :math:`C = AB` + """ + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + summa_dim: int, + out_shape: Tuple[int, ...], + row_rank: int, + col_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int) -> Tensor: + # A: [b / q, s, h / q] -> [(b * s) / q, h / q] + # B: [h / q, s / q] + # C: [b / q, s, s / q] -> [(b * s) / q, s / q] + + assert A.shape[-1] == B.shape[-2], \ + 'Invalid shapes: A={}, B={} for AB.'.format(A.shape, B.shape) + + if ctx: + ctx.save_for_backward(A, B) + + A_shape = A.shape + A = A.reshape((-1, A_shape[-1])) + B_shape = B.shape + B = B.reshape((-1, B_shape[-1])) + C_shape = (A.shape[0], B.shape[-1]) + C = torch.zeros(C_shape, dtype=A.dtype, device=get_current_device()) + + for i in range(summa_dim): + A_temp = A.clone() + B_temp = B.clone() + src_a = i + summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(A_temp, src=src_a, + group=gpc.get_group(row_parallel_mode)) + src_b = col_rank + summa_dim * i + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(B_temp, src=src_b, + group=gpc.get_group(col_parallel_mode)) + torch.addmm(C, A_temp, B_temp, out=C) + + out = C.reshape(out_shape) + + if ctx: + ctx.summa_dim = summa_dim + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.A_shape = A_shape + ctx.B_shape = B_shape + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + A_grad = Matmul_ABT_2D.forward( + None, + output_grad, B, + ctx.summa_dim, ctx.A_shape, + ctx.row_rank, ctx.col_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + B_grad = Matmul_ATB_2D.forward( + None, + A, output_grad, + ctx.summa_dim, ctx.B_shape, + ctx.row_rank, ctx.col_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None + + +class Matmul_ABT_2D(torch.autograd.Function): + """Matrix multiplication for :math:`C = AB^T` + """ + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + summa_dim: int, + out_shape: Tuple[int, ...], + row_rank: int, + col_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int + ) -> Tensor: + + assert A.shape[-1] == B.shape[-1], \ + 'Invalid shapes: A={}, B={} for ABT.'.format(A.shape, B.shape) + + if ctx: + ctx.save_for_backward(A, B) + + A_shape = A.shape + A = A.reshape((-1, A_shape[-1])) + B_shape = B.shape + B = B.reshape((-1, B_shape[-1])) + C_shape = (A.shape[0], B.shape[0]) + C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device()) + + for i in range(summa_dim): + B_temp = B.clone() + # C_temp = torch.zeros(C_shape, dtype=C.dtype, device=get_current_device()) + src_b = col_rank + summa_dim * i + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(B_temp, src=src_b, + group=gpc.get_group(col_parallel_mode)) + C_temp = torch.matmul(A, B_temp.transpose(0, 1)) + src_c = i + summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(C_temp, dst=src_c, + group=gpc.get_group(row_parallel_mode)) + if i == col_rank: + C = C_temp.clone() + + out = C.reshape(out_shape) + + if ctx: + ctx.summa_dim = summa_dim + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.A_shape = A_shape + ctx.B_shape = B_shape + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + A_grad = Matmul_AB_2D.forward( + None, + output_grad, B, + ctx.summa_dim, ctx.A_shape, + ctx.row_rank, ctx.col_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + B_grad = Matmul_ATB_2D.forward( + None, + output_grad, A, + ctx.summa_dim, ctx.B_shape, + ctx.row_rank, ctx.col_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None + + +class Matmul_ATB_2D(torch.autograd.Function): + """Matrix multiplication for :math:`C = A^TB` + """ + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + summa_dim: int, + out_shape: Tuple[int, ...], + row_rank: int, + col_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int + ) -> Tensor: + + assert A.shape[-2] == B.shape[-2], \ + 'Invalid shapes: A={}, B={} for ATB.'.format(A.shape, B.shape) + + if ctx: + ctx.save_for_backward(A, B) + + A_shape = A.shape + A = A.reshape((-1, A_shape[-1])) + B_shape = B.shape + B = B.reshape((-1, B_shape[-1])) + C_shape = (A.shape[-1], B.shape[-1]) + C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device()) + + for i in range(summa_dim): + A_temp = A.clone() + # C_temp = torch.zeros(C_shape, dtype=C.dtype, device=get_current_device()) + src_a = i + summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(A_temp, src=src_a, + group=gpc.get_group(row_parallel_mode)) + C_temp = torch.matmul(A_temp.transpose(0, 1), B) + src_c = col_rank + summa_dim * i + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(C_temp, dst=src_c, + group=gpc.get_group(col_parallel_mode)) + if i == row_rank: + C = C_temp.clone() + + out = C.reshape(out_shape) + + if ctx: + ctx.summa_dim = summa_dim + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.A_shape = A_shape + ctx.B_shape = B_shape + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + A_grad = Matmul_ABT_2D.forward( + None, + B, output_grad, + ctx.summa_dim, ctx.A_shape, + ctx.row_rank, ctx.col_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + B_grad = Matmul_AB_2D.forward( + None, + A, output_grad, + ctx.summa_dim, ctx.B_shape, + ctx.row_rank, ctx.col_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None + + +class Add_Bias_2D(torch.autograd.Function): + """Matrix add bias: :math:`C = A + b` + """ + @staticmethod + def forward(ctx: Any, + input: Tensor, + bias: Tensor, + output_size_per_partition: int, + row_rank: int, + col_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + skip_bias_add: bool, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int + ) -> Tensor: + if row_rank == 0: + bias_temp = bias.clone() + else: + bias_temp = torch.zeros( + output_size_per_partition, + dtype=bias.dtype, + device=get_current_device()) + src_rank = col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(bias_temp, src=src_rank, + group=gpc.get_group(col_parallel_mode)) + + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.bias = skip_bias_add + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + if skip_bias_add: + return bias_temp + else: + output = input + bias_temp + return output + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + row_rank = ctx.row_rank + col_rank = ctx.col_rank + row_parallel_mode = ctx.row_parallel_mode + col_parallel_mode = ctx.col_parallel_mode + data_parallel_rank = ctx.data_parallel_rank + pipeline_parallel_rank = ctx.pipeline_parallel_rank + pipeline_parallel_size = ctx.pipeline_parallel_size + tensor_parallel_size = ctx.tensor_parallel_size + + if ctx.bias: + dst_rank = col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(output_grad, dst=dst_rank, + group=gpc.get_group(col_parallel_mode)) + if row_rank == 0: + return None, output_grad, None, None, None, None, None, None, None, None, None, None + else: + # for compatibility with zero optimizer, no grad should be None + grad_tmp = torch.zeros_like(output_grad) + return None, grad_tmp, None, None, None, None, None, None, None, None, None, None + else: + reduce_dim = tuple(range(output_grad.ndim - 1)) + reduce = torch.sum(output_grad, dim=reduce_dim) + dst_rank = col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(reduce, dst=dst_rank, + group=gpc.get_group(col_parallel_mode)) + if row_rank == 0: + return output_grad, reduce, None, None, None, None, None, None, None, None, None, None + else: + # for compatibility with zero optimizer, no grad should be None + reduce_tmp = torch.zeros_like(reduce) + return output_grad, reduce_tmp, None, None, None, None, None, None, None, None, None, None + + +class _LayerNorm_2D(torch.autograd.Function): + + @staticmethod + def forward(ctx: Any, + input: Tensor, + E_x: Tensor, + Var_x: Tensor, + hidden_size: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode) -> Tensor: + input = input - E_x + # in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps) + ctx.normalized_shape = hidden_size + output = input * Var_x + ctx.save_for_backward(output, Var_x) + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + return output + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + row_parallel_mode = ctx.row_parallel_mode + col_parallel_mode = ctx.col_parallel_mode + x, Var_x = ctx.saved_tensors + # in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x + output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True) + torch.distributed.all_reduce( + output_grad_sum, group=gpc.get_group(row_parallel_mode)) + output_grad_sum /= ctx.normalized_shape + + output_grad_mul_x_sum = torch.sum( + output_grad * x, dim=-1, keepdim=True) + torch.distributed.all_reduce( + output_grad_mul_x_sum, group=gpc.get_group(row_parallel_mode)) + output_grad_mul_x_sum /= ctx.normalized_shape + + input_grad = output_grad.clone() + input_grad -= x * output_grad_mul_x_sum + input_grad -= output_grad_sum + input_grad *= Var_x + + return input_grad, None, None, None, None, None + + +# class Sum_2D(torch.autograd.Function): +# +# @staticmethod +# def forward(ctx: Any, +# inputs: Tensor, +# dim: int, +# summa_dim: int, +# row_parallel_mode: ParallelMode, +# keepdim: bool = False) -> Tensor: +# # input: [b/q, s, h/q] +# empty_cache() +# ctx.save_for_backward(inputs) +# # sum: [b/q, s] +# out = torch.sum(inputs, dim=dim, keepdim=keepdim) +# torch.distributed.all_reduce(out, group=gpc.get_group(row_parallel_mode)) +# return out +# +# @staticmethod +# def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: +# with torch.no_grad(): +# inputs = ctx.saved_tensors +# input_grad = torch.ones(inputs.shape, dtype=output_grad.dtype) +# return input_grad, None, None, None, None, None + + +class _ViT_Split_Input_2D(torch.autograd.Function): + + @staticmethod + def forward(ctx: Any, + inputs: Tensor, + batch_size: int, + summa_dim: int, + col_parallel_mode: ParallelMode) -> Tensor: + # inputs: [b, s, h/q] + # output: [b/q, s, h/q] + + ctx.BATCH_SIZE = batch_size + ctx.summa_dim = summa_dim + ctx.col_parallel_mode = col_parallel_mode + row_rank = gpc.get_local_rank(col_parallel_mode) + output = torch.chunk(inputs, summa_dim, dim=0)[row_rank] + output = output.clone() + return output + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + # output_grad: [b/q, s, h/q] + # grads: [b, s, h/q] + grads_shape = (ctx.BATCH_SIZE,) + output_grad.shape[1:] + grads = torch.empty(grads_shape, + dtype=output_grad.dtype, + device=get_current_device()) + dist.all_gather(list(grads.chunk(ctx.summa_dim, dim=0)), + output_grad.contiguous(), + group=gpc.get_group(ctx.col_parallel_mode)) + return grads, None, None, None diff --git a/colossalai/nn/layer/parallel_2d/_transformer.py b/colossalai/nn/layer/parallel_2d/_transformer.py new file mode 100644 index 000000000..3a3cc4840 --- /dev/null +++ b/colossalai/nn/layer/parallel_2d/_transformer.py @@ -0,0 +1,220 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math + +import torch +from torch import nn as nn, Tensor + +from colossalai.nn.layer._common_utils import divide, ACT2FN +from colossalai.nn.layer.parallel_2d._utils import assert_summa_initialization, get_summa_dim_from_env +from colossalai.registry import LAYERS +from .layers import Linear2D, LayerNorm2D +from ..base_layer import ParallelLayer + + +@LAYERS.register_module +class TransformerMLP2D(ParallelLayer): + """ + MLP will take the input with h hidden state, project it to mlp_ratio * h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. At the end, dropout is also + applied. + + :param in_features: the size of input tensor + :type in_features: int + :param mlp_ratio: hidden size of MLP divided by embedding dim, defaults to 4.0 + :type mlp_ratio: int, optional + :param act_func: activation function, defaults to 'gelu' + :type act_func: str, optional + :param dropout_prob: dropout probability, defaults to 0. + :type dropout_prob: float, optional + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param skip_bias_add: If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False + :type skip_bias_add: bool, optional + """ + + def __init__(self, + in_features: int, + mlp_ratio: int = 4.0, + act_func: str = 'gelu', + dropout_prob: float = 0., + dtype=None, + skip_bias_add: bool = False + ): + super().__init__() + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.in_features = in_features + self.skip_bias_add = skip_bias_add + + # Project to h * mlp_ratio. + self.dense_1 = Linear2D( + in_features, + int(mlp_ratio * in_features), + dtype=dtype, + skip_bias_add=self.skip_bias_add + ) + + assert act_func in ACT2FN.keys(), f'Invalid value for argument act_func, ' \ + f'activation function can only be {list(ACT2FN.keys())}' + self.activation_func = ACT2FN[act_func] + + # Project back to h. + self.dense_2 = Linear2D( + int(mlp_ratio * in_features), + in_features, + dtype=dtype, + skip_bias_add=self.skip_bias_add + ) + self.dropout = nn.Dropout(dropout_prob) + self.layernorm = LayerNorm2D(in_features, dtype=dtype) + + def forward(self, x: Tensor) -> Tensor: + if self.skip_bias_add: + intermediate_output, _ = self.dense_1(x) + else: + intermediate_output = self.dense_1(x) + + intermediate_output = self.activation_func(intermediate_output) + + if self.skip_bias_add: + output, _ = self.dense_2(intermediate_output) + else: + output = self.dense_2(intermediate_output) + + output = self.dropout(output) + output = self.layernorm(x + output) + return output + + +@LAYERS.register_module +class TransformerSelfAttention2D(ParallelLayer): + """Self attention layer for 2D parallel Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param attention_dropout_prob: dropout probability for attention layer + :type attention_dropout_prob: float + :param hidden_dropout_prob: dropout probability for hidden layer + :type hidden_dropout_prob: float + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + hidden_size: int, + num_attention_heads: int, + attention_dropout_prob: float, + hidden_dropout_prob: float, + dtype=None, + ): + + super().__init__() + + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.hidden_size = hidden_size + self.num_attention_heads = divide(num_attention_heads, self.summa_dim) + self.attention_head_size = divide(hidden_size, num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query_key_value = Linear2D( + hidden_size, + 3 * hidden_size, + dtype=dtype, + ) + self.attention_dropout = nn.Dropout(attention_dropout_prob) + self.dense = Linear2D( + hidden_size, + hidden_size, + dtype=dtype, + ) + self.dropout = nn.Dropout(hidden_dropout_prob) + self.layernorm = LayerNorm2D( + hidden_size, + dtype=dtype) + + def forward(self, hidden_states: Tensor, attention_mask: Tensor) -> Tensor: + query_key_value = self.query_key_value(hidden_states) + new_qkv_shape = query_key_value.shape[:-1] + \ + (self.num_attention_heads, 3 * self.attention_head_size) + query_key_value = query_key_value.view(new_qkv_shape) + query_key_value = query_key_value.permute((0, 2, 1, 3)) + query_layer, key_layer, value_layer = torch.chunk( + query_key_value, 3, dim=-1) + + attention_scores = torch.matmul( + query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / \ + math.sqrt(self.attention_head_size) + attention_scores = attention_scores + attention_mask + attention_probs = nn.Softmax(dim=-1)(attention_scores) + attention_probs = self.attention_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute((0, 2, 1, 3)).contiguous() + new_context_layer_shape = context_layer.size()[ + :-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + output = self.dense(context_layer) + output = self.dropout(output) + attention_output = self.layernorm(hidden_states + output) + + return attention_output + + +@LAYERS.register_module +class TransformerLayer2D(ParallelLayer): + """Transformer layer which contains a self-attention layer and a MLP layer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param act_func: activation function, defaults to 'gelu' + :type act_func: str, optional + :param mlp_ratio: hidden size of MLP divided by embedding dim, defaults to 4.0 + :type mlp_ratio: float, optional + :param attention_dropout_prob: dropout probability for attention layer, defaults to 0. + :type attention_dropout_prob: float, optional + :param hidden_dropout_prob: dropout probability for attention layer, defaults to 0. + :type hidden_dropout_prob: float, optional + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + hidden_size: int, + num_attention_heads: int, + act_func: str = 'gelu', + mlp_ratio: float = 4.0, + attention_dropout_prob: float = 0., + hidden_dropout_prob: float = 0., + dtype=None, + ): + super().__init__() + + self.attention = TransformerSelfAttention2D( + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + attention_dropout_prob=attention_dropout_prob, + hidden_dropout_prob=hidden_dropout_prob, + dtype=dtype, + ) + self.mlp = TransformerMLP2D( + in_features=hidden_size, + dropout_prob=hidden_dropout_prob, + act_func=act_func, + mlp_ratio=mlp_ratio, + dtype=dtype, + ) + + def forward(self, hidden_states: Tensor, attention_mask: Tensor) -> Tensor: + attention_output = self.attention(hidden_states, attention_mask) + output = self.mlp(attention_output) + return output diff --git a/colossalai/nn/layer/parallel_2d/_utils.py b/colossalai/nn/layer/parallel_2d/_utils.py new file mode 100644 index 000000000..65d3af2b0 --- /dev/null +++ b/colossalai/nn/layer/parallel_2d/_utils.py @@ -0,0 +1,23 @@ +import os + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.context.process_group_initializer.initializer_2d import SUMMA_DIM +from colossalai.core import global_context as gpc + + +def get_summa_dim_from_env() -> int: + try: + summa_dim = os.environ[SUMMA_DIM] + summa_dim = int(summa_dim) + assert summa_dim > 0, 'SUMMA_DIM must be larger than zero' + return summa_dim + + except KeyError as e: + raise EnvironmentError('SUMMA_DIM is not found in the current environment, ' + 'please make sure that you have used the correct process group initializer') + + +def assert_summa_initialization(): + assert gpc.is_initialized(ParallelMode.PARALLEL_2D_COL) and \ + gpc.is_initialized(ParallelMode.PARALLEL_2D_ROW), \ + 'Both TWO_DIMENSION_COL and TWO_DIMENSION_ROW must be initialized by the process group initializer' diff --git a/colossalai/nn/layer/parallel_2d/_vit.py b/colossalai/nn/layer/parallel_2d/_vit.py new file mode 100644 index 000000000..211de1e9f --- /dev/null +++ b/colossalai/nn/layer/parallel_2d/_vit.py @@ -0,0 +1,391 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math + +import torch +from torch import nn as nn, Tensor, distributed as dist + +from colossalai.context import seed, ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer._common_utils import divide, ACT2FN +from colossalai.nn.layer.parallel_2d._utils import assert_summa_initialization, get_summa_dim_from_env +from colossalai.nn.layer.vanilla_vision_transformer.layers import to_2tuple +from colossalai.registry import LAYERS +from colossalai.utils import checkpoint +from colossalai.utils import get_current_device +from ._operation import _ViT_Split_Input_2D +from .layers import Linear2D +from .._common_utils import set_tensor_parallel_attribute +from ..base_layer import ParallelLayer + + +@LAYERS.register_module +class ViTMLP2D(ParallelLayer): + """MLP layer for 2D parallel Vision Transformer + + :param in_features: size of each input sample + :type in_features: int + :param mlp_ratio: hidden size of MLP divided by embedding dim + :type mlp_ratio: int + :param act_func: activation function, defaults to 'gelu' + :type act_func: str, optional + :param dropout_prob: dropout probability, defaults to 0. + :type dropout_prob: float, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param checkpoint: whether to checkpoint the layer, defaults to False + :type checkpoint: bool, optional + """ + + def __init__(self, + in_features: int, + mlp_ratio: int, + act_func: str = 'gelu', + dropout_prob: float = 0., + dtype=None, + checkpoint: bool = False + ): + super().__init__() + + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.in_features = in_features + self.mlp_ratio = mlp_ratio + self.checkpoint = checkpoint + + # Project to mlp_ratio * h. + self.dense_1 = Linear2D( + self.in_features, + self.mlp_ratio * self.in_features, + dtype=dtype, + ) + + self.act = ACT2FN[act_func] + + # Project back to h. + self.dense_2 = Linear2D( + self.mlp_ratio * self.in_features, + self.in_features, + dtype=dtype, + ) + self.dropout = nn.Dropout(dropout_prob) + + def _forward(self, hidden_states: Tensor) -> Tensor: + intermediate_output = self.dense_1(hidden_states) + intermediate_output = self.act(intermediate_output) + + with seed(ParallelMode.TENSOR): + intermediate_output = self.dropout(intermediate_output) + output = self.dense_2(intermediate_output) + + with seed(ParallelMode.TENSOR): + output = self.dropout(output) + return output + + def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor: + return checkpoint(self._forward, hidden_states) + + def forward(self, hidden_states: Tensor) -> Tensor: + if self.checkpoint: + return self._checkpoint_forward(hidden_states) + else: + return self._forward(hidden_states) + + +@LAYERS.register_module +class ViTSelfAttention2D(ParallelLayer): + """Self-attention layer for 2D parallel Vision Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param attention_dropout_prob: dropout probability for attention layers + :type attention_dropout_prob: float + :param hidden_dropout_prob: dropout probability for hidden layers + :type hidden_dropout_prob: float + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param checkpoint: whether to checkpoint the layer, defaults to False + :type checkpoint: bool, optional + """ + + def __init__(self, + hidden_size: int, + num_attention_heads: int, + attention_dropout_prob: float, + hidden_dropout_prob: float, + dtype=None, + checkpoint: bool = False + ): + super().__init__() + + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.hidden_size = hidden_size + self.num_attention_heads = divide(num_attention_heads, self.summa_dim) + self.attention_head_size = divide(hidden_size, num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.checkpoint = checkpoint + + self.query_key_value = Linear2D( + hidden_size, + 3 * hidden_size, + dtype=dtype, + ) + self.attention_dropout = nn.Dropout(attention_dropout_prob) + self.dense = Linear2D( + hidden_size, + hidden_size, + dtype=dtype, + ) + self.dropout = nn.Dropout(hidden_dropout_prob) + self.softmax = nn.Softmax(dim=-1) + + def _forward(self, hidden_states: Tensor) -> Tensor: + query_key_value = self.query_key_value(hidden_states) + new_qkv_shape = query_key_value.shape[:-1] + \ + (self.num_attention_heads, 3 * self.attention_head_size) + query_key_value = query_key_value.view(new_qkv_shape) + query_key_value = query_key_value.permute((0, 2, 1, 3)) + query_layer, key_layer, value_layer = torch.chunk( + query_key_value, 3, dim=-1) + + attention_scores = torch.matmul( + query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / \ + math.sqrt(self.attention_head_size) + + attention_probs = self.softmax(attention_scores) + + with seed(ParallelMode.TENSOR): + attention_probs = self.attention_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.transpose(1, 2) + new_context_layer_shape = context_layer.size()[ + :-2] + (self.all_head_size,) + context_layer = context_layer.reshape(new_context_layer_shape) + + output = self.dense(context_layer) + with seed(ParallelMode.TENSOR): + output = self.dropout(output) + return output + + def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor: + return checkpoint(self._forward, hidden_states) + + def forward(self, hidden_states: Tensor) -> Tensor: + if self.checkpoint: + return self._checkpoint_forward(hidden_states) + else: + return self._forward(hidden_states) + + +@LAYERS.register_module +class ViTHead2D(ParallelLayer): + """Output layer for 2D parallel Vision Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_classes: number of classes + :type num_classes: int + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + hidden_size, + num_classes, + dtype=None, + ): + super().__init__() + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.linear = Linear2D( + hidden_size, + num_classes, + dtype=dtype, + ) + + def forward(self, x: Tensor) -> Tensor: + x = x[:, 0] + x = self.linear(x) + return x + + +@LAYERS.register_module +class ViTPatchEmbedding2D(ParallelLayer): + """ 2D Image to Patch Embedding + + :param img_size: iamge size + :type img_size: int + :param patch_size: patch size + :type patch_size: int + :param embed_dim: dimension of embedding + :type embed_dim: int + :param in_chans: number of channels of input image, defaults to 3 + :type in_chans: int, optional + :param flatten: whether to flatten output tensor, defaults to True + :type flatten: bool, optional + """ + + def __init__(self, + img_size, + patch_size, + embed_dim, + in_chans=3, + flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + self.embed_dim = embed_dim // self.summa_dim + + with seed(ParallelMode.TENSOR): + # ensure the partitions are initialized differently + self.proj = nn.Conv2d(in_chans, + self.embed_dim, + kernel_size=patch_size, + stride=patch_size + ) + + # sync + self._broadcast_conv_params() + self.proj.weight.register_hook(self._sync_grad_during_backward) + self.proj.bias.register_hook(self._sync_grad_during_backward) + + def _set_tensor_parallel_attribute(self): + set_tensor_parallel_attribute(self.proj.weight) + set_tensor_parallel_attribute(self.proj.bias) + + def _broadcast_conv_params(self) -> None: + self.to(get_current_device()) + ranks_in_col = gpc.get_ranks_in_group(ParallelMode.PARALLEL_2D_COL) + + dist.broadcast(self.proj.weight, src=ranks_in_col[0], + group=gpc.get_group(ParallelMode.PARALLEL_2D_COL)) + dist.broadcast(self.proj.bias, src=ranks_in_col[0], + group=gpc.get_group(ParallelMode.PARALLEL_2D_COL)) + + def _sync_grad_during_backward(self, grad: Tensor) -> None: + dist.all_reduce(grad, group=gpc.get_group( + ParallelMode.PARALLEL_2D_COL)) + grad = grad / self.summa_dim + return grad + + def forward(self, x: Tensor) -> Tensor: + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + return x + + +@LAYERS.register_module +class ViTTokenFuser2D(ParallelLayer): + """ + Fuse cls token and pos embedding to the input + + :param img_size: image size + :type img_size: int + :param patch_size: patch size + :type patch_size: int + :param embed_dim: dimension of embedding + :type embed_dim: int + :param drop_rate: dropout probability, defaults to 0. + :type drop_rate: float, optional + """ + + def __init__(self, + img_size, + patch_size, + embed_dim, + drop_rate=0. + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.embed_dim = embed_dim + + self.cls_token = nn.Parameter(torch.zeros( + 1, 1, self.embed_dim // self.summa_dim)) + self.pos_embed = nn.Parameter(torch.zeros( + 1, self.num_patches + 1, self.embed_dim // self.summa_dim)) + + # move to cuda before broadcast + self.to(get_current_device()) + + # sync param in both forward and backward + _cls_token = self.cls_token.view(-1) + _pos_embed = self.pos_embed.view(-1) + self._param = torch.cat([_cls_token, _pos_embed], dim=0) + + self._broadcast_params(self._param) + self._param.register_hook(self._sync_grad_hook) + self.pos_drop = nn.Dropout(p=drop_rate) + self._set_tensor_parallel_attribute() + + def _set_tensor_parallel_attribute(self): + set_tensor_parallel_attribute(self.cls_token) + set_tensor_parallel_attribute(self.pos_embed) + + def _broadcast_params(self, param) -> None: + " broadcast to all column ranks for data consistency " + ranks_in_col = gpc.get_ranks_in_group(ParallelMode.PARALLEL_2D_COL) + col_group = gpc.get_group(ParallelMode.PARALLEL_2D_COL) + dist.broadcast(param, src=ranks_in_col[0], + group=col_group) + + def _sync_grad_hook(self, grad) -> None: + dist.all_reduce(grad, group=gpc.get_group( + ParallelMode.PARALLEL_2D_COL)) + grad = grad / self.summa_dim + return grad + + def forward(self, x: Tensor) -> Tensor: + # stole cls_tokens impl from Phil Wang, thanks + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_token, x), dim=1) + with seed(ParallelMode.TENSOR): + x = self.pos_drop(x + self.pos_embed) + return x + + +@LAYERS.register_module +class ViTInputSplitter2D(ParallelLayer): + """Split the input tensor for 2D parallel Vision Transformer + """ + + def __init__(self): + super().__init__() + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + + def forward(self, x: Tensor) -> Tensor: + batch_size = x.size(0) + return _ViT_Split_Input_2D.apply( + x, + batch_size, + self.summa_dim, + ParallelMode.PARALLEL_2D_COL + ) diff --git a/colossalai/nn/layer/parallel_2d/layers.py b/colossalai/nn/layer/parallel_2d/layers.py new file mode 100644 index 000000000..570cf1c25 --- /dev/null +++ b/colossalai/nn/layer/parallel_2d/layers.py @@ -0,0 +1,258 @@ +import math + +import torch +import torch.distributed as dist +from torch import Tensor +from torch.nn import Parameter, init as init + +from colossalai.context import seed, ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import LAYERS +from colossalai.utils import get_current_device +from ._operation import Matmul_AB_2D, Add_Bias_2D, _LayerNorm_2D +from ._utils import get_summa_dim_from_env, assert_summa_initialization +from .._common_utils import divide, set_tensor_parallel_attribute +from ..base_layer import ParallelLayer + + +@LAYERS.register_module +class Linear2D(ParallelLayer): + """ Linear layer for 2D parallelism + + :param in_features: size of each input sample + :type in_features: int + :param out_features: size of each output sample + :type out_features: int + :param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True + :type bias: bool, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param skip_bias_add: If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False + :type skip_bias_add: bool, optional + """ + + def __init__(self, + in_features: int, + out_features: int, + bias: bool = True, + dtype=None, + skip_bias_add: bool = False + ): + super().__init__() + + self.in_features = in_features + self.out_features = out_features + self.skip_bias_add = skip_bias_add + + # parallel settings + assert_summa_initialization() + self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + self.summa_dim = get_summa_dim_from_env() + + # partitioning dimension + self.input_size_per_partition = divide( + self.in_features, self.summa_dim) + self.hidden_size_per_partition = divide( + self.out_features, self.summa_dim) + + # create weight, shape: [k/q, h/q] + factory_kwargs = {'device': get_current_device(), 'dtype': dtype} + self.weight = Parameter(torch.empty( + self.input_size_per_partition, + self.hidden_size_per_partition, + **factory_kwargs)) + + # create bias, shape: [h/q] + if bias: + self.bias = Parameter(torch.empty( + self.hidden_size_per_partition, + **factory_kwargs)) + else: + self.register_parameter('bias', None) + + # initialize parameters + self.reset_parameters() + self._set_tensor_parallel_attributes() + + def _set_tensor_parallel_attributes(self): + set_tensor_parallel_attribute(self.weight) + if self.bias is not None: + set_tensor_parallel_attribute(self.bias) + + def reset_parameters(self) -> None: + # setting + fan_in = self.in_features + a = math.sqrt(5) + nonlinearity = 'leaky_relu' + + # init weight + std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in) + bound = math.sqrt(3.0) * std + with seed(ParallelMode.TENSOR): + init.uniform_(self.weight, -bound, bound) + + # init bias + if self.bias is not None: + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + with seed(ParallelMode.TENSOR): + init.uniform_(self.bias, -bound, bound) + + def forward(self, x: Tensor) -> Tensor: + # input: [m/q, n/q, k/q] + # output: [m/q, n/q, h/q] + out_shape = x.shape[:-1] + (self.hidden_size_per_partition,) + + output = Matmul_AB_2D.apply( + x, + self.weight, + self.summa_dim, + out_shape, + self.row_rank, + self.col_rank, + ParallelMode.PARALLEL_2D_ROW, + ParallelMode.PARALLEL_2D_COL, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size) + + if self.bias is not None: + if self.skip_bias_add: + bias = Add_Bias_2D.apply( + None, + self.bias, + self.hidden_size_per_partition, + self.row_rank, + self.col_rank, + ParallelMode.PARALLEL_2D_ROW, + ParallelMode.PARALLEL_2D_COL, + True, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + return output, bias + else: + output = Add_Bias_2D.apply( + output, + self.bias, + self.hidden_size_per_partition, + self.row_rank, + self.col_rank, + ParallelMode.PARALLEL_2D_ROW, + ParallelMode.PARALLEL_2D_COL, + False, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + return output + else: + return output + + +@LAYERS.register_module +class LayerNorm2D(ParallelLayer): + r"""Layer Normalization for 2D parallelism + + :param normalized_shape: input shape from an expected input + of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` + If a single integer is used, it is treated as a singleton list, and this module will + normalize over the last dimension which is expected to be of that specific size. + :type normalized_shape: int + :param eps: a value added to the denominator for numerical stability, defaults to 1e-05 + :type eps: float, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + normalized_shape: int, + eps: float = 1e-05, + dtype=None + ): + super().__init__() + + # layer norm config + self.normalized_shape = normalized_shape + self.variance_epsilon = eps + + # parallel setting + assert_summa_initialization() + self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + self.summa_dim = get_summa_dim_from_env() + + # partitioning dimension + self.partitioned_partition = divide(normalized_shape, self.summa_dim) + + # create parameters + factory_kwargs = {'device': get_current_device(), 'dtype': dtype} + + if self.row_rank == 0: + self.gamma = Parameter(torch.ones( + self.partitioned_partition, + **factory_kwargs)) + self.beta = Parameter(torch.zeros( + self.partitioned_partition, + **factory_kwargs)) + else: + self.gamma = Parameter(torch.tensor( + 1.0, + requires_grad=True, + **factory_kwargs)) + self.beta = Parameter(torch.tensor( + 1.0, + requires_grad=True, + **factory_kwargs)) + + self._set_tensor_parallel_attributes() + + def _set_tensor_parallel_attributes(self): + set_tensor_parallel_attribute(self.gamma) + set_tensor_parallel_attribute(self.beta) + + def forward(self, x: Tensor) -> Tensor: + with torch.no_grad(): + E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1] + torch.distributed.all_reduce( + E_x, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) + E_x /= self.normalized_shape + + # Var_x in the block below is the sum of input^2 + Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1] + torch.distributed.all_reduce( + Var_x, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) + Var_x /= self.normalized_shape + + Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1] + # this time 1/sqrt(Var_x + epsilon) + Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon) + + output = _LayerNorm_2D.apply(x, E_x, Var_x, self.normalized_shape, + ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL) + bias = Add_Bias_2D.apply( + None, self.beta, self.partitioned_partition, + self.row_rank, self.col_rank, + ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, + True, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + scale = Add_Bias_2D.apply( + None, self.gamma, self.partitioned_partition, + self.row_rank, self.col_rank, + ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, + True, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + output = torch.addcmul(bias, scale, output) + return output diff --git a/colossalai/nn/layer/parallel_2p5d/__init__.py b/colossalai/nn/layer/parallel_2p5d/__init__.py new file mode 100644 index 000000000..b4ebc12ea --- /dev/null +++ b/colossalai/nn/layer/parallel_2p5d/__init__.py @@ -0,0 +1,13 @@ +from ._operation import Matmul_AB_2p5D, Matmul_ABT_2p5D, Matmul_ATB_2p5D, Sum_2p5D, Add_Bias_2p5D +from ._transformer import TransformerMLP2p5D, TransformerSelfAttention2p5D, TransformerLayer2p5D +from ._vit import (ViTMLP2p5D, ViTSelfAttention2p5D, ViTHead2p5D, ViTPatchEmbedding2p5D, ViTTokenFuser2p5D, + ViTInputSplitter2p5D) +from .layers import Linear2p5D, LayerNorm2p5D + +__all__ = [ + 'Matmul_AB_2p5D', 'Matmul_ABT_2p5D', 'Matmul_ATB_2p5D', 'Sum_2p5D', 'Add_Bias_2p5D', + 'TransformerMLP2p5D', 'TransformerSelfAttention2p5D', 'TransformerLayer2p5D', + 'ViTMLP2p5D', 'ViTSelfAttention2p5D', 'ViTHead2p5D', 'ViTPatchEmbedding2p5D', 'ViTTokenFuser2p5D', + 'ViTInputSplitter2p5D', + 'Linear2p5D', 'LayerNorm2p5D' +] diff --git a/colossalai/nn/layer/parallel_2p5d/_operation.py b/colossalai/nn/layer/parallel_2p5d/_operation.py new file mode 100644 index 000000000..db50b44fb --- /dev/null +++ b/colossalai/nn/layer/parallel_2p5d/_operation.py @@ -0,0 +1,535 @@ +from typing import Any, Tuple + +import torch +import torch.distributed as dist +from torch import Tensor + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import get_current_device, empty_cache + + +def get_parallel_group(parallel_mode: ParallelMode): + return gpc.get_group(parallel_mode) + + +def get_global_rank(): + return gpc.get_global_rank() + + +def get_parallel_rank(parallel_mode: ParallelMode): + return gpc.get_local_rank(parallel_mode) + + +class Matmul_AB_2p5D(torch.autograd.Function): + """Matrix multiplication for :math:`C = AB` + """ + + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + tesseract_dim: int, + tesseract_dep: int, + out_shape: Tuple[int, ...], + row_rank: int, + col_rank: int, + dep_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + dep_parallel_mode: ParallelMode, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int) -> Tensor: + # A: [b / dq, s, h / q] -> [(b * s) / dq, h / q] + # B: [h / dq, s / q] + # C: [b / dq, s, s / q] -> [(b * s) / dq, s / q] + + assert A.shape[-1] == B.shape[-2], \ + 'Invalid shapes: A={}, B={} for AB.'.format(A.shape, B.shape) + + empty_cache() + if ctx: + ctx.save_for_backward(A, B) + + A_shape = A.shape + A = A.reshape((-1, A_shape[-1])) + B_shape = B.shape + B = B.reshape((-1, B_shape[-1])) + C_shape = (A.shape[0], B.shape[-1]) + C = torch.zeros(C_shape, dtype=A.dtype, device=get_current_device()) + + for i in range(tesseract_dim): + A_temp = A.clone() + B_temp = B.clone() + src_a = i + row_rank * tesseract_dim + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(A_temp, src=src_a, + group=get_parallel_group(row_parallel_mode)) + src_b = col_rank + i * tesseract_dim + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(B_temp, src=src_b, + group=get_parallel_group(col_parallel_mode)) + torch.addmm(C, A_temp, B_temp, out=C) + + out = C.reshape(out_shape) + + if ctx: + ctx.tesseract_dim = tesseract_dim + ctx.tesseract_dep = tesseract_dep + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.dep_rank = dep_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.dep_parallel_mode = dep_parallel_mode + ctx.A_shape = A_shape + ctx.B_shape = B_shape + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + A_grad = Matmul_ABT_2p5D.forward( + None, + output_grad, B, + ctx.tesseract_dim, ctx.tesseract_dep, ctx.A_shape, + ctx.row_rank, ctx.col_rank, ctx.dep_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.dep_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + B_grad = Matmul_ATB_2p5D.forward( + None, + A, output_grad, + ctx.tesseract_dim, ctx.tesseract_dep, ctx.B_shape, + ctx.row_rank, ctx.col_rank, ctx.dep_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.dep_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None + + +class Matmul_ABT_2p5D(torch.autograd.Function): + """Matrix multiplication for :math:`C = AB^T` + """ + + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + tesseract_dim: int, + tesseract_dep: int, + out_shape: Tuple[int, ...], + row_rank: int, + col_rank: int, + dep_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + dep_parallel_mode: ParallelMode, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int + ) -> Tensor: + + assert A.shape[-1] == B.shape[-1], \ + 'Invalid shapes: A={}, B={} for ABT.'.format(A.shape, B.shape) + + empty_cache() + if ctx: + ctx.save_for_backward(A, B) + + A_shape = A.shape + A = A.reshape((-1, A_shape[-1])) + B_shape = B.shape + B = B.reshape((-1, B_shape[-1])) + C_shape = (A.shape[0], B.shape[0]) + C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device()) + + for i in range(tesseract_dim): + B_temp = B.clone() + src_b = col_rank + i * tesseract_dim + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(B_temp, src=src_b, group=gpc.get_group(col_parallel_mode)) + C_temp = torch.matmul(A, B_temp.transpose(0, 1)) + src_c = i + row_rank * tesseract_dim + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(C_temp, dst=src_c, group=gpc.get_group(row_parallel_mode)) + if i == col_rank: + C = C_temp.clone() + + out = C.reshape(out_shape) + + if ctx: + ctx.tesseract_dim = tesseract_dim + ctx.tesseract_dep = tesseract_dep + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.dep_rank = dep_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.dep_parallel_mode = dep_parallel_mode + ctx.A_shape = A_shape + ctx.B_shape = B_shape + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + A_grad = Matmul_AB_2p5D.forward( + None, + output_grad, B, + ctx.tesseract_dim, ctx.tesseract_dep, ctx.A_shape, + ctx.row_rank, ctx.col_rank, ctx.dep_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.dep_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + B_grad = Matmul_ATB_2p5D.forward( + None, + output_grad, A, + ctx.tesseract_dim, ctx.tesseract_dep, ctx.B_shape, + ctx.row_rank, ctx.col_rank, ctx.dep_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.dep_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None + + +class Matmul_ATB_2p5D(torch.autograd.Function): + """Matrix multiplication for :math:`C = A^TB` + """ + + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + tesseract_dim: int, + tesseract_dep: int, + out_shape: Tuple[int, ...], + row_rank: int, + col_rank: int, + dep_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + dep_parallel_mode: ParallelMode, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int): + + assert A.shape[-2] == B.shape[-2], \ + 'Invalid shapes: A={}, B={} for ATB.'.format(A.shape, B.shape) + + empty_cache() + if ctx: + ctx.save_for_backward(A, B) + + A_shape = A.shape + A = A.reshape((-1, A_shape[-1])) + B_shape = B.shape + B = B.reshape((-1, B_shape[-1])) + C_shape = (A.shape[-1], B.shape[-1]) + C = torch.empty(C_shape, dtype=A.dtype, device=get_current_device()) + + for i in range(tesseract_dim): + A_temp = A.clone() + src_a = i + row_rank * tesseract_dim + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(A_temp, src=src_a, + group=get_parallel_group(row_parallel_mode)) + C_temp = torch.matmul(A_temp.transpose(0, 1), B) + src_c = col_rank + i * tesseract_dim + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(C_temp, dst=src_c, + group=get_parallel_group(col_parallel_mode)) + if i == row_rank: + C = C_temp.clone() + + out = C.reshape(out_shape) + + if ctx: + ctx.tesseract_dim = tesseract_dim + ctx.tesseract_dep = tesseract_dep + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.dep_rank = dep_rank + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.dep_parallel_mode = dep_parallel_mode + ctx.A_shape = A_shape + ctx.B_shape = B_shape + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + A_grad = Matmul_ABT_2p5D.forward( + None, + B, output_grad, + ctx.tesseract_dim, ctx.tesseract_dep, ctx.A_shape, + ctx.row_rank, ctx.col_rank, ctx.dep_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.dep_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + B_grad = Matmul_AB_2p5D.forward( + None, + A, output_grad, + ctx.tesseract_dim, ctx.tesseract_dep, ctx.B_shape, + ctx.row_rank, ctx.col_rank, ctx.dep_rank, + ctx.row_parallel_mode, + ctx.col_parallel_mode, + ctx.dep_parallel_mode, + ctx.data_parallel_rank, + ctx.pipeline_parallel_rank, + ctx.pipeline_parallel_size, + ctx.tensor_parallel_size + ) + return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None + + +class Add_Bias_2p5D(torch.autograd.Function): + """Matrix add bias: :math:`C = A + b` + """ + + @staticmethod + def forward(ctx: Any, + input: Tensor, + bias: Tensor, + output_size_per_partition: int, + tesseract_dim: int, + tesseract_dep: int, + row_rank: int, + col_rank: int, + dep_rank: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + dep_parallel_mode: ParallelMode, + skip_bias_add: bool, + data_parallel_rank: int, + pipeline_parallel_rank: int, + pipeline_parallel_size: int, + tensor_parallel_size: int + ) -> Tensor: + if row_rank == 0: + bias_temp = bias.clone() + else: + bias_temp = torch.zeros( + output_size_per_partition, + dtype=bias.dtype, + device=get_current_device()) + src_rank = col_rank + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.broadcast(bias_temp, src=src_rank, group=get_parallel_group(col_parallel_mode)) + + ctx.row_rank = row_rank + ctx.col_rank = col_rank + ctx.dep_rank = dep_rank + ctx.tesseract_dim = tesseract_dim + ctx.tesseract_dep = tesseract_dep + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.dep_parallel_mode = dep_parallel_mode + ctx.bias = skip_bias_add + ctx.data_parallel_rank = data_parallel_rank + ctx.pipeline_parallel_rank = pipeline_parallel_rank + ctx.pipeline_parallel_size = pipeline_parallel_size + ctx.tensor_parallel_size = tensor_parallel_size + + if skip_bias_add: + return bias_temp + else: + output = input + bias_temp + return output + + @staticmethod + def backward(ctx, output_grad): + row_rank = ctx.row_rank + col_rank = ctx.col_rank + dep_rank = ctx.dep_rank + tesseract_dim = ctx.tesseract_dim + tesseract_dep = ctx.tesseract_dep + row_parallel_mode = ctx.row_parallel_mode + col_parallel_mode = ctx.col_parallel_mode + dep_parallel_mode = ctx.dep_parallel_mode + data_parallel_rank = ctx.data_parallel_rank + pipeline_parallel_rank = ctx.pipeline_parallel_rank + pipeline_parallel_size = ctx.pipeline_parallel_size + tensor_parallel_size = ctx.tensor_parallel_size + + if ctx.bias: + dst_rank = col_rank + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(output_grad, dst=dst_rank, group=get_parallel_group(col_parallel_mode)) + if row_rank == 0: + return None, output_grad, None, None, None, None, None, None, None, None, None, None, None, None, None, None + else: + grad_tmp = torch.zeros_like(output_grad) + return None, grad_tmp, None, None, None, None, None, None, None, None, None, None, None, None, None, None + else: + reduce_dim = tuple(range(output_grad.ndim - 1)) + reduce = torch.sum(output_grad, dim=reduce_dim) + dst_rank = col_rank + dep_rank * ( + tesseract_dim ** 2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + \ + pipeline_parallel_rank * tensor_parallel_size + dist.reduce(reduce, dst=dst_rank, group=get_parallel_group(col_parallel_mode)) + if row_rank == 0: + return output_grad, reduce, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None + else: + reduce_tmp = torch.zeros_like(reduce) + return output_grad, reduce_tmp, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None + + +class _LayerNorm_2p5D(torch.autograd.Function): + @staticmethod + def forward(ctx: Any, + input: Tensor, + E_x: Tensor, + Var_x: Tensor, + hidden_size: int, + row_parallel_mode: ParallelMode, + col_parallel_mode: ParallelMode, + dep_parallel_mode: ParallelMode) -> Tensor: + input = input - E_x + # in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps) + ctx.hidden_size = hidden_size + output = input * Var_x + ctx.save_for_backward(output, Var_x) + ctx.row_parallel_mode = row_parallel_mode + ctx.col_parallel_mode = col_parallel_mode + ctx.dep_parallel_mode = dep_parallel_mode + return output + + @staticmethod + def backward(ctx, output_grad): + row_parallel_mode = ctx.row_parallel_mode + col_parallel_mode = ctx.col_parallel_mode + dep_parallel_mode = ctx.dep_parallel_mode + x, Var_x = ctx.saved_tensors + # in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x + with torch.no_grad(): + output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True) + torch.distributed.all_reduce( + output_grad_sum, group=get_parallel_group(row_parallel_mode)) + output_grad_sum /= ctx.hidden_size + + output_grad_mul_x_sum = torch.sum( + output_grad * x, dim=-1, keepdim=True) + torch.distributed.all_reduce( + output_grad_mul_x_sum, group=get_parallel_group(row_parallel_mode)) + output_grad_mul_x_sum /= ctx.hidden_size + + input_grad = output_grad.clone() + input_grad -= x * output_grad_mul_x_sum + input_grad -= output_grad_sum + input_grad *= Var_x + + return input_grad, None, None, None, None, None, None + + +class Sum_2p5D(torch.autograd.Function): + """Compute the sum of input tensors + """ + + @staticmethod + def forward(ctx, + inputs, + dim, + tesseract_dim, + row_parallel_mode, + keepdim=False): + # input: [b/q, s, h/q] + empty_cache() + ctx.save_for_backward(inputs) + # sum: [b/q, s] + out = torch.sum(inputs, dim=dim, keepdim=keepdim) + torch.distributed.all_reduce( + out, group=gpc.get_group(row_parallel_mode)) + return out + + @staticmethod + def backward(ctx, output_grad): + with torch.no_grad(): + inputs = ctx.saved_tensors + input_grad = torch.ones(inputs.shape, dtype=output_grad.dtype) + return input_grad, None, None, None, None, None + + +class _ViT_Split_2p5D(torch.autograd.Function): + @staticmethod + def forward(ctx, inputs, batch_size, + tesseract_dim, tesseract_dep, + xz_parallel_mode): + # inputs: [b, s, h/q] + # output: [b/dq, s, h/q] + empty_cache() + + ctx.batch_size = batch_size + ctx.tesseract_dim = tesseract_dim + ctx.tesseract_dep = tesseract_dep + ctx.xz_parallel_mode = xz_parallel_mode + xz_rank = gpc.get_local_rank(xz_parallel_mode) + output = torch.chunk(inputs, tesseract_dep * + tesseract_dim, dim=0)[xz_rank] + output = output.clone() + return output + + @staticmethod + def backward(ctx, output_grad): + # output_grad: [b/dq, s, h/q] + # grads: [b, s, h/q] + # * + grads_shape = (ctx.batch_size,) + output_grad.shape[1:] + grads = torch.empty(grads_shape, + dtype=output_grad.dtype, + device=get_current_device()) + dist.all_gather(list(grads.chunk(ctx.tesseract_dim * ctx.tesseract_dep, dim=0)), + output_grad.contiguous(), + group=get_parallel_group(ctx.xz_parallel_mode)) + return grads, None, None, None, None diff --git a/colossalai/nn/layer/parallel_2p5d/_transformer.py b/colossalai/nn/layer/parallel_2p5d/_transformer.py new file mode 100644 index 000000000..c13ef87b4 --- /dev/null +++ b/colossalai/nn/layer/parallel_2p5d/_transformer.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math + +import torch +from torch import nn as nn, Tensor + +from colossalai.nn.layer._common_utils import divide +from colossalai.registry import LAYERS +from ._utils import assert_tesseract_initialization, \ + get_tesseract_dim_dep_from_env +from .layers import Linear2p5D, LayerNorm2p5D +from .._common_utils import ACT2FN + + +@LAYERS.register_module +class TransformerMLP2p5D(nn.Module): + """ + MLP will take the input with h hidden state, project it to mlp_ratio * h + hidden dimension, perform nonlinear transformation, and project the + state back into h hidden dimension. At the end, dropout is also + applied. + + :param in_features: the size of input tensor + :type in_features: int + :param mlp_ratio: hidden size of MLP divided by embedding dim, defaults to 4.0 + :type mlp_ratio: int, optional + :param act_func: activation function, defaults to 'gelu' + :type act_func: str, optional + :param dropout_prob: dropout probability, defaults to 0. + :type dropout_prob: float, optional + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + in_features: int, + mlp_ratio: int, + act_func: str = 'gelu', + dropout_prob: float = 0., + dtype=None, + ): + super().__init__() + assert_tesseract_initialization() + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + self.in_features = in_features + + # Project to h * mlp_ratio. + self.dense_1 = Linear2p5D( + in_features, + mlp_ratio * in_features, + dtype=dtype + ) + + assert act_func in ACT2FN.keys(), f'Invalid value for argument act_func, ' \ + f'activation function can only be {list(ACT2FN.keys())}' + self.activation_func = ACT2FN[act_func] + + # Project back to h. + self.dense_2 = Linear2p5D( + mlp_ratio * in_features, + in_features, + dtype=dtype + ) + self.dropout = nn.Dropout(dropout_prob) + self.layernorm = LayerNorm2p5D(in_features, dtype=dtype) + + def forward(self, x: Tensor) -> Tensor: + intermediate_output = self.dense_1(x) + intermediate_output = self.activation_func(intermediate_output) + output = self.dense_2(intermediate_output) + output = self.dropout(output) + output = self.layernorm(x + output) + return output + + +@LAYERS.register_module +class TransformerSelfAttention2p5D(nn.Module): + """Self attention layer for 2.5D parallel Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param attention_dropout_prob: dropout probability for attention layer + :type attention_dropout_prob: float + :param hidden_dropout_prob: dropout probability for hidden layer + :type hidden_dropout_prob: float + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + hidden_size, + num_attention_heads, + attention_dropout_prob, + hidden_dropout_prob, + dtype=None, + ): + super().__init__() + + assert_tesseract_initialization() + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + self.hidden_size = hidden_size + self.num_attention_heads = divide( + num_attention_heads, self.tesseract_dim) # * + self.attention_head_size = divide(hidden_size, num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query_key_value = Linear2p5D( + hidden_size, + 3 * hidden_size, + dtype=dtype, + ) + self.attention_dropout = nn.Dropout(attention_dropout_prob) + self.dense = Linear2p5D( + hidden_size, + hidden_size, + dtype=dtype, + ) + self.dropout = nn.Dropout(hidden_dropout_prob) + self.layernorm = LayerNorm2p5D( + hidden_size, + dtype=dtype) + + def forward(self, hidden_states: Tensor, attention_mask: Tensor) -> Tensor: + query_key_value = self.query_key_value(hidden_states) + new_qkv_shape = query_key_value.shape[:-1] + \ + (self.num_attention_heads, 3 * self.attention_head_size) + query_key_value = query_key_value.view(new_qkv_shape) + query_key_value = query_key_value.permute((0, 2, 1, 3)) + query_layer, key_layer, value_layer = torch.chunk( + query_key_value, 3, dim=-1) + + attention_scores = torch.matmul( + query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / \ + math.sqrt(self.attention_head_size) + attention_scores = attention_scores + attention_mask + attention_probs = nn.Softmax(dim=-1)(attention_scores) + attention_probs = self.attention_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute((0, 2, 1, 3)).contiguous() + new_context_layer_shape = context_layer.size()[ + :-2] + (self.all_head_size,) + context_layer = context_layer.view(*new_context_layer_shape) + + output = self.dense(context_layer) + output = self.dropout(output) + attention_output = self.layernorm(hidden_states + output) + + return attention_output + + +@LAYERS.register_module +class TransformerLayer2p5D(nn.Module): + """Transformer layer which contains a self-attention layer and a MLP layer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param act_func: activation function, defaults to 'gelu' + :type act_func: str, optional + :param mlp_ratio: hidden size of MLP divided by embedding dim, defaults to 4.0 + :type mlp_ratio: float, optional + :param attention_dropout_prob: dropout probability for attention layer, defaults to 0. + :type attention_dropout_prob: float, optional + :param hidden_dropout_prob: dropout probability for attention layer, defaults to 0. + :type hidden_dropout_prob: float, optional + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + hidden_size, + num_attention_heads, + act_func='gelu', + mlp_ratio=4, + attention_dropout_prob: float = 0., + hidden_dropout_prob: float = 0., + dtype=None, + ): + super().__init__() + + self.attention = TransformerSelfAttention2p5D( + hidden_size=hidden_size, + num_attention_heads=num_attention_heads, + attention_dropout_prob=attention_dropout_prob, + hidden_dropout_prob=hidden_dropout_prob, + dtype=dtype, + ) + self.mlp = TransformerMLP2p5D( + in_features=hidden_size, + dropout_prob=hidden_dropout_prob, + act_func=act_func, + mlp_ratio=mlp_ratio, + dtype=dtype, + ) + + def forward(self, hidden_states: Tensor, attention_mask: Tensor) -> Tensor: + attention_output = self.attention(hidden_states, attention_mask) + output = self.mlp(attention_output) + return output diff --git a/colossalai/nn/layer/parallel_2p5d/_utils.py b/colossalai/nn/layer/parallel_2p5d/_utils.py new file mode 100644 index 000000000..c9c6b194f --- /dev/null +++ b/colossalai/nn/layer/parallel_2p5d/_utils.py @@ -0,0 +1,25 @@ +import os + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + + +def get_tesseract_dim_dep_from_env(): + try: + tesseract_dim = int(os.environ['TESSERACT_DIM']) + tesseract_dep = int(os.environ['TESSERACT_DEP']) + assert tesseract_dim > 0, 'TESSERACT_DIM must be larger than zero' + assert tesseract_dep > 0, 'TESSERACT_DEP must be larger than zero' + return tesseract_dim, tesseract_dep + + except KeyError as e: + raise EnvironmentError('TESSERACT_DIM or TESSERACT_DEP is not found in the current environment, ' + 'please make sure that you have used the correct process group initializer') + + +def assert_tesseract_initialization(): + assert gpc.is_initialized(ParallelMode.PARALLEL_2P5D_COL) and \ + gpc.is_initialized(ParallelMode.PARALLEL_2P5D_ROW) and \ + gpc.is_initialized(ParallelMode.PARALLEL_2P5D_DEP) and \ + gpc.is_initialized(ParallelMode.PARALLEL_2P5D_XZ), \ + 'Both PARALLEL_2P5D_COL, PARALLEL_2P5D_ROW, PARALLEL_2P5D_DEP and PARALLEL_2P5D_XZ must be initialized by the process group initializer' diff --git a/colossalai/nn/layer/parallel_2p5d/_vit.py b/colossalai/nn/layer/parallel_2p5d/_vit.py new file mode 100644 index 000000000..4e992ac34 --- /dev/null +++ b/colossalai/nn/layer/parallel_2p5d/_vit.py @@ -0,0 +1,351 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math + +import torch +from torch import nn as nn, Tensor, distributed as dist + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.vanilla_vision_transformer.layers import to_2tuple +from colossalai.registry import LAYERS +from colossalai.utils import get_current_device +from ._operation import _ViT_Split_2p5D +from ._utils import assert_tesseract_initialization, \ + get_tesseract_dim_dep_from_env +from .layers import Linear2p5D +from .._common_utils import ACT2FN, divide, CheckpointModule +from .._common_utils import set_tensor_parallel_attribute + + +@LAYERS.register_module +class ViTMLP2p5D(CheckpointModule): + """MLP layer for 2.5D parallel Vision Transformer + + :param in_features: size of each input sample + :type in_features: int + :param mlp_ratio: hidden size of MLP divided by embedding dim + :type mlp_ratio: int + :param act_func: activation function, defaults to 'gelu' + :type act_func: str, optional + :param dropout_prob: dropout probability, defaults to 0. + :type dropout_prob: float, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param checkpoint: If set to `True`, activation checkpoint is used, defaults to `False` + :type checkpoint: bool, optional + """ + + def __init__(self, + in_features: int, + mlp_ratio: int, + act_func: str = 'gelu', + dropout_prob: float = 0., + dtype=None, + checkpoint: bool = False + ): + super().__init__(checkpoint=checkpoint) + + assert_tesseract_initialization() + self.in_features = in_features + self.mlp_ratio = mlp_ratio + + # Project to mlp_ratio * h. + self.dense_1 = Linear2p5D( + self.in_features, + self.mlp_ratio * self.in_features, + dtype=dtype, + ) + + self.act = ACT2FN[act_func] + + # Project back to h. + self.dense_2 = Linear2p5D( + self.mlp_ratio * self.in_features, + self.in_features, + dtype=dtype, + ) + self.dropout = nn.Dropout(dropout_prob) + + def _forward(self, hidden_states: Tensor) -> Tensor: + intermediate_output = self.dense_1(hidden_states) + intermediate_output = self.act(intermediate_output) + intermediate_output = self.dropout(intermediate_output) + output = self.dense_2(intermediate_output) + output = self.dropout(output) + return output + + +@LAYERS.register_module +class ViTSelfAttention2p5D(CheckpointModule): + """Self-attention layer for 2.5D parallel Vision Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param attention_dropout_prob: dropout probability for attention layers + :type attention_dropout_prob: float + :param hidden_dropout_prob: dropout probability for hidden layers + :type hidden_dropout_prob: float + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + :param checkpoint: If set to `True`, activation checkpoint is used, defaults to `False` + :type checkpoint: bool, optional + """ + + def __init__(self, + hidden_size, + num_attention_heads, + attention_dropout_prob, + hidden_dropout_prob, + dtype=None, + checkpoint: bool = False + ): + super().__init__(checkpoint=checkpoint) + + assert_tesseract_initialization() + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + self.hidden_size = hidden_size + self.num_attention_heads = divide( + num_attention_heads, self.tesseract_dim) # * + self.attention_head_size = divide(hidden_size, num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query_key_value = Linear2p5D( + hidden_size, + 3 * hidden_size, + dtype=dtype, + ) + self.attention_dropout = nn.Dropout(attention_dropout_prob) + self.dense = Linear2p5D( + hidden_size, + hidden_size, + dtype=dtype, + ) + self.dropout = nn.Dropout(hidden_dropout_prob) + + def _forward(self, hidden_states: Tensor) -> Tensor: + query_key_value = self.query_key_value(hidden_states) + new_qkv_shape = query_key_value.shape[:-1] + \ + (self.num_attention_heads, 3 * self.attention_head_size) + query_key_value = query_key_value.view(new_qkv_shape) + query_key_value = query_key_value.permute((0, 2, 1, 3)) + query_layer, key_layer, value_layer = torch.chunk( + query_key_value, 3, dim=-1) + + attention_scores = torch.matmul( + query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / \ + math.sqrt(self.attention_head_size) + + attention_probs = nn.Softmax(dim=-1)(attention_scores) + attention_probs = self.attention_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.transpose(1, 2) + new_context_layer_shape = context_layer.size()[ + :-2] + (self.all_head_size,) + context_layer = context_layer.reshape(new_context_layer_shape) + + output = self.dense(context_layer) + output = self.dropout(output) + return output + + +@LAYERS.register_module +class ViTHead2p5D(nn.Module): + """Output layer for 2.5D parallel Vision Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_classes: number of classes + :type num_classes: int + :param dtype: dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + hidden_size, + num_classes, + dtype=None, + ): + super().__init__() + assert_tesseract_initialization() + self.linear = Linear2p5D( + hidden_size, + num_classes, + dtype=dtype, + ) + + def forward(self, x: Tensor) -> Tensor: + x = x[:, 0] + x = self.linear(x) + return x + + +@LAYERS.register_module +class ViTPatchEmbedding2p5D(nn.Module): + """ 2.5D Image to Patch Embedding + + :param img_size: iamge size + :type img_size: int + :param patch_size: patch size + :type patch_size: int + :param embed_dim: dimension of embedding + :type embed_dim: int + :param in_chans: number of channels of input image, defaults to 3 + :type in_chans: int, optional + :param flatten: whether to flatten output tensor, defaults to True + :type flatten: bool, optional + """ + + def __init__(self, + img_size, + patch_size, + embed_dim, + in_chans=3, + flatten=True): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + assert_tesseract_initialization() + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + self.embed_dim = embed_dim // self.tesseract_dim # * + + self.proj = nn.Conv2d(in_chans, + self.embed_dim, + kernel_size=patch_size, + stride=patch_size, + ) + + # move self to cuda before sync + self.to(get_current_device()) + + # sync + self._broadcast_conv_params() + self.proj.weight.register_hook(self._sync_grad_during_backward) + self.proj.bias.register_hook(self._sync_grad_during_backward) + + def _broadcast_conv_params(self) -> None: + xz_rank = gpc.get_ranks_in_group(ParallelMode.PARALLEL_2P5D_XZ) + dist.broadcast(self.proj.weight, src=xz_rank[0], + group=gpc.get_group(ParallelMode.PARALLEL_2P5D_XZ)) + dist.broadcast(self.proj.bias, src=xz_rank[0], + group=gpc.get_group(ParallelMode.PARALLEL_2P5D_XZ)) + + def _sync_grad_during_backward(self, grad: Tensor) -> None: + dist.all_reduce(grad, group=gpc.get_group( + ParallelMode.PARALLEL_2P5D_XZ)) + grad = grad / self.tesseract_dim / self.tesseract_dep # * + return grad + + def forward(self, x: Tensor) -> Tensor: + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + return x + + +@LAYERS.register_module +class ViTTokenFuser2p5D(nn.Module): + """ + Fuse cls token and pos embedding to the input + + :param img_size: image size + :type img_size: int + :param patch_size: patch size + :type patch_size: int + :param embed_dim: dimension of embedding + :type embed_dim: int + :param drop_rate: dropout probability, defaults to 0. + :type drop_rate: float, optional + """ + + def __init__(self, + img_size, + patch_size, + embed_dim, + drop_rate=0. + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + + assert_tesseract_initialization() + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.embed_dim = embed_dim + + self.cls_token = nn.Parameter(torch.zeros( + 1, 1, self.embed_dim // self.tesseract_dim)) # * + self.pos_embed = nn.Parameter(torch.zeros( + 1, self.num_patches + 1, self.embed_dim // self.tesseract_dim)) # * + + # move to cuda before broadcast + self.to(get_current_device()) + + self._broadcast_params() + self.cls_token.register_hook(self._sync_grad_hook) + self.pos_embed.register_hook(self._sync_grad_hook) + self.pos_drop = nn.Dropout(p=drop_rate) + self._set_tensor_parallel_attribute() + + def _set_tensor_parallel_attribute(self): + set_tensor_parallel_attribute(self.cls_token) + set_tensor_parallel_attribute(self.pos_embed) + + def _broadcast_params(self) -> None: + " broadcast to all column ranks for data consistency " + xz_rank = gpc.get_ranks_in_group(ParallelMode.PARALLEL_2P5D_XZ) + dist.broadcast(self.cls_token, src=xz_rank[0], + group=gpc.get_group(ParallelMode.PARALLEL_2P5D_XZ)) + dist.broadcast(self.pos_embed, src=xz_rank[0], + group=gpc.get_group(ParallelMode.PARALLEL_2P5D_XZ)) + + def _sync_grad_hook(self, grad) -> None: + dist.all_reduce(grad, group=gpc.get_group( + ParallelMode.PARALLEL_2P5D_XZ)) + grad = grad / self.tesseract_dim / self.tesseract_dep # * + return grad + + def forward(self, x: Tensor) -> Tensor: + # stole cls_tokens impl from Phil Wang, thanks + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + return x + + +@LAYERS.register_module +class ViTInputSplitter2p5D(nn.Module): + + def __init__(self): + super().__init__() + assert_tesseract_initialization() + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + + def forward(self, x: Tensor) -> Tensor: + batch_size = x.size(0) + return _ViT_Split_2p5D.apply( + x, + batch_size, + self.tesseract_dim, + self.tesseract_dep, + ParallelMode.PARALLEL_2P5D_XZ, + ) diff --git a/colossalai/nn/layer/parallel_2p5d/layers.py b/colossalai/nn/layer/parallel_2p5d/layers.py new file mode 100644 index 000000000..63b0f2a5c --- /dev/null +++ b/colossalai/nn/layer/parallel_2p5d/layers.py @@ -0,0 +1,266 @@ +import math + +import torch +from torch import Tensor +from torch.nn import Parameter, init as init + +from colossalai.context import seed, ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import LAYERS +from colossalai.utils import get_current_device +from ._operation import Matmul_AB_2p5D, Add_Bias_2p5D, _LayerNorm_2p5D +from ._utils import get_tesseract_dim_dep_from_env, assert_tesseract_initialization +from .._common_utils import divide, set_tensor_parallel_attribute +from ..base_layer import ParallelLayer + + +@LAYERS.register_module +class Linear2p5D(ParallelLayer): + """Linear layer for 2.5D parallelism + + :param in_features: size of each input sample + :type in_features: int + :param out_features: size of each output sample + :type out_features: int + :param bias: If set to ``False``, the layer will not learn an additive bias, defaults to True + :type bias: bool, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + + def __init__(self, + in_features: int, + out_features: int, + bias: bool = True, + dtype=None, + skip_bias_add: bool = False + ): + super().__init__() + + self.in_features = in_features + self.out_features = out_features + self.skip_bias_add = skip_bias_add + + # parallel setting + assert_tesseract_initialization() + self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + + # partitioning dimension + self.input_size_per_partition = divide(in_features, self.tesseract_dim) + self.hidden_size_per_partition = divide( + out_features, self.tesseract_dim) + + # create weight, shape: [k/q, h/q] + factory_kwargs = {'device': get_current_device(), 'dtype': dtype} + self.weight = Parameter(torch.empty( + self.input_size_per_partition, + self.hidden_size_per_partition, + **factory_kwargs)) + + # create bias, shape: [h/q] + if bias: + self.bias = Parameter(torch.empty( + self.hidden_size_per_partition, + **factory_kwargs)) + else: + self.register_parameter('bias', None) + + # initialize parameters + self.reset_parameters() + self._set_tensor_parallel_attributes() + + def _set_tensor_parallel_attributes(self): + set_tensor_parallel_attribute(self.weight) + if self.bias is not None: + set_tensor_parallel_attribute(self.bias) + + def reset_parameters(self) -> None: + # setting + fan_in = self.in_features + a = math.sqrt(5) + nonlinearity = 'leaky_relu' + + # init weight + std = init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in) + bound = math.sqrt(3.0) * std + with seed(ParallelMode.TENSOR): + init.uniform_(self.weight, -bound, bound) + + # init bias + if self.bias is not None: + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + with seed(ParallelMode.TENSOR): + init.uniform_(self.bias, -bound, bound) + + def forward(self, x: Tensor) -> Tensor: + # input: [m/dq, n/q, k/q] + # output: [m/dq, n/q, h/q] + out_shape = x.shape[:-1] + (self.hidden_size_per_partition,) + output = Matmul_AB_2p5D.apply( + x, + self.weight, + self.tesseract_dim, + self.tesseract_dep, + out_shape, + self.row_rank, self.col_rank, self.dep_rank, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size, + ) + + if self.bias is not None: + if self.skip_bias_add: + bias = Add_Bias_2p5D.apply( + None, + self.bias, + self.hidden_size_per_partition, + self.tesseract_dim, self.tesseract_dep, + self.row_rank, self.col_rank, self.dep_rank, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + True, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + return output, bias + else: + output = Add_Bias_2p5D.apply( + output, + self.bias, + self.hidden_size_per_partition, + self.tesseract_dim, self.tesseract_dep, + self.row_rank, self.col_rank, self.dep_rank, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + False, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + return output + else: + return output + + +@LAYERS.register_module +class LayerNorm2p5D(ParallelLayer): + r"""Layer Normalization for 2.5D parallelism + + :param normalized_shape: input shape from an expected input + of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` + If a single integer is used, it is treated as a singleton list, and this module will + normalize over the last dimension which is expected to be of that specific size. + :type normalized_shape: int + :param eps: a value added to the denominator for numerical stability, defaults to 1e-05 + :type eps: float, optional + :param dtype: The dtype of parameters, defaults to None + :type dtype: torch.dtype, optional + """ + def __init__(self, + normalized_shape: int, + eps: float = 1e-05, + dtype=None + ): + super().__init__() + + # layer norm config + self.normalized_shape = normalized_shape + self.variance_epsilon = eps + + # parallel setting + assert_tesseract_initialization() + self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + + # partitioning dimension + self.partitioned_partition = divide( + normalized_shape, self.tesseract_dim) # * + + # create parameters + factory_kwargs = {'device': get_current_device(), 'dtype': dtype} + + if self.row_rank == 0: + self.gamma = Parameter(torch.ones( + self.partitioned_partition, + **factory_kwargs)) + self.beta = Parameter(torch.zeros( + self.partitioned_partition, + **factory_kwargs)) + else: + self.gamma = Parameter(torch.tensor( + 1.0, + requires_grad=True, + **factory_kwargs)) + self.beta = Parameter(torch.tensor( + 1.0, + requires_grad=True, + **factory_kwargs)) + self._set_tensor_parallel_attribute() + + def _set_tensor_parallel_attribute(self): + set_tensor_parallel_attribute(self.gamma) + set_tensor_parallel_attribute(self.beta) + + def forward(self, x: Tensor) -> Tensor: + with torch.no_grad(): + E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1] + torch.distributed.all_reduce( + E_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) + E_x /= self.normalized_shape + + # Var_x in the block below is the sum of input^2 + Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1] + torch.distributed.all_reduce( + Var_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) + Var_x /= self.normalized_shape + + Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1] + # this time 1/sqrt(Var_x + epsilon) + Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon) + + output = _LayerNorm_2p5D.apply(x, E_x, Var_x, self.normalized_shape, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP) + bias = Add_Bias_2p5D.apply( + None, self.beta, self.partitioned_partition, + self.tesseract_dim, self.tesseract_dep, + self.row_rank, self.col_rank, self.dep_rank, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + True, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + scale = Add_Bias_2p5D.apply( + None, self.gamma, self.partitioned_partition, + self.tesseract_dim, self.tesseract_dep, + self.row_rank, self.col_rank, self.dep_rank, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + True, + self.data_parallel_rank, + self.pipeline_parallel_rank, + self.pipeline_parallel_size, + self.tensor_parallel_size + ) + output = torch.addcmul(bias, scale, output) + return output diff --git a/colossalai/nn/layer/parallel_3d/__init__.py b/colossalai/nn/layer/parallel_3d/__init__.py new file mode 100644 index 000000000..b2d3a2a1a --- /dev/null +++ b/colossalai/nn/layer/parallel_3d/__init__.py @@ -0,0 +1,9 @@ +from ._operation import Matmul_ABT_3D, Matmul_ATB_3D, Matmul_AB_3D, Mul_3D, Sum_3D, Add_3D, Reduce_3D +from ._vit import ViTHead3D, ViTMLP3D, ViTPatchEmbedding3D, ViTSelfAttention3D +from .layers import Linear3D, LayerNorm3D + +__all__ = [ + 'Matmul_ABT_3D', 'Matmul_ATB_3D', 'Matmul_AB_3D', 'Mul_3D', 'Sum_3D', 'Add_3D', 'Reduce_3D', + 'ViTHead3D', 'ViTMLP3D', 'ViTPatchEmbedding3D', 'ViTSelfAttention3D', + 'Linear3D', 'LayerNorm3D' +] diff --git a/colossalai/nn/layer/parallel_3d/_operation.py b/colossalai/nn/layer/parallel_3d/_operation.py new file mode 100644 index 000000000..cb790fb51 --- /dev/null +++ b/colossalai/nn/layer/parallel_3d/_operation.py @@ -0,0 +1,349 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Any, Tuple + +import torch +import torch.distributed as dist +from colossalai.communication import all_gather, reduce_scatter, scatter +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.utils import empty_cache, get_current_device +from torch import Tensor + + +class Matmul_AB_3D(torch.autograd.Function): + """Matrix multiplication for :math:`C = AB` + """ + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + depth: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + output_parallel_mode: ParallelMode, + input_dim: int = 0, + weight_dim: int = -1, + output_dim: int = 0) -> Tensor: + # A: [m/q^2, n, k/q] + # B: [k/q, h/q^2] + # C: [m/q^2, n, h/q] + empty_cache() + ctx.save_for_backward(A, B) + + assert A.shape[-1] == B.shape[0], \ + 'Invalid shapes: A={}, B={}.'.format(A.shape, B.shape) + + A_temp = all_gather(A, input_dim, input_parallel_mode) + B_temp = all_gather(B, weight_dim, weight_parallel_mode) + + C = torch.matmul(A_temp, B_temp) + out = reduce_scatter(C, output_dim, output_parallel_mode) + + ctx.depth = depth + ctx.A_group_parallel_mode = input_parallel_mode + ctx.B_group_parallel_mode = weight_parallel_mode + ctx.C_group_parallel_mode = output_parallel_mode + ctx.A_dim = input_dim + ctx.B_dim = weight_dim + ctx.C_dim = output_dim + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + with torch.no_grad(): + A_grad = Matmul_ABT_3D.apply(output_grad, B, ctx.depth, + ctx.C_group_parallel_mode, + ctx.B_group_parallel_mode, + ctx.A_group_parallel_mode, ctx.C_dim, + ctx.B_dim, ctx.A_dim) + B_grad = Matmul_ATB_3D.apply(A, output_grad, ctx.depth, + ctx.A_group_parallel_mode, + ctx.C_group_parallel_mode, + ctx.B_group_parallel_mode, ctx.A_dim, + ctx.C_dim, ctx.B_dim) + return A_grad, B_grad, None, None, None, None, None, None, None + + +class Matmul_ABT_3D(torch.autograd.Function): + """Matrix multiplication for :math:`C = AB^T` + """ + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + depth: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + output_parallel_mode: ParallelMode, + input_dim: int = 0, + weight_dim: int = -1, + output_dim: int = 0) -> Tensor: + # A: [m/q^2, n, h/q] + # B: [k/q, h/q^2] + # C: [m/q^2, n, k/q] + empty_cache() + ctx.save_for_backward(A, B) + + A_temp = all_gather(A, input_dim, input_parallel_mode) + B_temp = all_gather(B, weight_dim, weight_parallel_mode) + + C = torch.matmul(A_temp, B_temp.transpose(0, 1)) + out = reduce_scatter(C, output_dim, output_parallel_mode) + + ctx.depth = depth + ctx.A_group_parallel_mode = input_parallel_mode + ctx.B_group_parallel_mode = weight_parallel_mode + ctx.C_group_parallel_mode = output_parallel_mode + ctx.A_dim = input_dim + ctx.B_dim = weight_dim + ctx.C_dim = output_dim + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + with torch.no_grad(): + A_grad = Matmul_AB_3D.apply(output_grad, B, ctx.depth, + ctx.C_group_parallel_mode, + ctx.B_group_parallel_mode, + ctx.A_group_parallel_mode, ctx.C_dim, + ctx.B_dim, ctx.A_dim) + B_grad = Matmul_ATB_3D.apply(output_grad, A, ctx.depth, + ctx.C_group_parallel_mode, + ctx.A_group_parallel_mode, + ctx.B_group_parallel_mode, ctx.C_dim, + ctx.A_dim, ctx.B_dim) + return A_grad, B_grad, None, None, None, None, None, None, None + + +class Matmul_ATB_3D(torch.autograd.Function): + """Matrix multiplication for :math:`C = A^TB` + """ + @staticmethod + def forward(ctx: Any, + A: Tensor, + B: Tensor, + depth: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + output_parallel_mode: ParallelMode, + input_dim: int = 0, + weight_dim: int = 0, + output_dim: int = -1) -> Tensor: + # A: [m/q^2, n, k/q] + # B: [m/q^2, n, h/q] + # C: [k/q, h/q^2] + empty_cache() + ctx.save_for_backward(A, B) + + A_temp = all_gather(A, input_dim, input_parallel_mode) + A_temp = A_temp.reshape(-1, A.shape[-1]) + B_temp = all_gather(B, weight_dim, weight_parallel_mode) + B_temp = B_temp.reshape(-1, B.shape[-1]) + + C = torch.matmul(A_temp.transpose(0, 1), B_temp) + out = reduce_scatter(C, output_dim, output_parallel_mode) + + ctx.depth = depth + ctx.A_group_parallel_mode = input_parallel_mode + ctx.B_group_parallel_mode = weight_parallel_mode + ctx.C_group_parallel_mode = output_parallel_mode + ctx.A_dim = input_dim + ctx.B_dim = weight_dim + ctx.C_dim = output_dim + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + A, B = ctx.saved_tensors + with torch.no_grad(): + A_grad = Matmul_ABT_3D.apply(B, output_grad, ctx.depth, + ctx.B_group_parallel_mode, + ctx.C_group_parallel_mode, + ctx.A_group_parallel_mode, ctx.B_dim, + ctx.C_dim, ctx.A_dim) + B_grad = Matmul_AB_3D.apply(A, output_grad, ctx.depth, + ctx.A_group_parallel_mode, + ctx.C_group_parallel_mode, + ctx.B_group_parallel_mode, ctx.A_dim, + ctx.C_dim, ctx.B_dim) + return A_grad, B_grad, None, None, None, None, None, None, None + + +class Add_3D(torch.autograd.Function): + """Matrix add bias: :math:`C = A + b` + """ + @staticmethod + def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + output_parallel_mode: ParallelMode) -> Tensor: + # input: [m/q^2, n, h/q] + # bias: [h/q^2] + ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode) + src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)] + bias_temp = bias.clone() + dist.broadcast(bias_temp, + src=src_rank, + group=gpc.get_group(input_parallel_mode)) + # [h/q] + bias_temp = all_gather(bias_temp, -1, weight_parallel_mode) + + out = input_ + bias_temp + + ctx.depth = depth + ctx.src_rank = src_rank + ctx.A_group_parallel_mode = input_parallel_mode + ctx.B_group_parallel_mode = weight_parallel_mode + ctx.C_group_parallel_mode = output_parallel_mode + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + # output_grad: [m/q^2, n, h/q] + with torch.no_grad(): + # [h/q] + grad = torch.sum(output_grad, + dim=tuple(range(len(output_grad.shape))[:-1])) + bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode) + dist.reduce(bias_grad, + dst=ctx.src_rank, + group=gpc.get_group(ctx.A_group_parallel_mode)) + if gpc.get_local_rank( + ctx.A_group_parallel_mode) != gpc.get_local_rank( + ctx.C_group_parallel_mode): + bias_grad = None + return output_grad, bias_grad, None, None, None, None + + +class Mul_3D(torch.autograd.Function): + """Matrix multiplication for :math:`C = A * b` + """ + @staticmethod + def forward(ctx: Any, input_: Tensor, bias: Tensor, depth: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + output_parallel_mode: ParallelMode) -> Tensor: + # input: [m/q^2, n, h/q] + # bias: [h/q^2] + ranks_in_group = gpc.get_ranks_in_group(input_parallel_mode) + src_rank = ranks_in_group[gpc.get_local_rank(output_parallel_mode)] + # [h/q^2] + bias_temp = bias.clone() + dist.broadcast(bias_temp, + src=src_rank, + group=gpc.get_group(input_parallel_mode)) + # [h/q] + bias_temp = all_gather(bias_temp, -1, weight_parallel_mode) + + empty_cache() + ctx.save_for_backward(input_, bias_temp) + + out = torch.mul(input_, bias_temp) + + ctx.depth = depth + ctx.src_rank = src_rank + ctx.A_group_parallel_mode = input_parallel_mode + ctx.B_group_parallel_mode = weight_parallel_mode + ctx.C_group_parallel_mode = output_parallel_mode + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + # output_grad: [m/q^2, n, h/q] + with torch.no_grad(): + input_, bias = ctx.saved_tensors + # [m/q^2, n, h/q] + input_grad = torch.mul(output_grad, bias) + # [h/q] + grad = torch.mul(output_grad, input_) + grad = torch.sum(grad, + dim=tuple(range(len(output_grad.shape))[:-1])) + bias_grad = reduce_scatter(grad, -1, ctx.B_group_parallel_mode) + dist.reduce(bias_grad, + dst=ctx.src_rank, + group=gpc.get_group(ctx.A_group_parallel_mode)) + if gpc.get_local_rank( + ctx.A_group_parallel_mode) != gpc.get_local_rank( + ctx.C_group_parallel_mode): + bias_grad = None + return input_grad, bias_grad, None, None, None, None + + +class Sum_3D(torch.autograd.Function): + """Compute the sum of input tensors + """ + @staticmethod + def forward(ctx: Any, + input_: Tensor, + dim: int, + depth: int, + parallel_mode: ParallelMode, + keepdim: bool = False) -> Tensor: + # input: [m/q^2, n, h/q] + out = torch.sum(input_, dim=dim, keepdim=keepdim) + dist.all_reduce(out, group=gpc.get_group(parallel_mode)) + + ctx.input_shape = input_.shape + ctx.depth = depth + ctx.group = parallel_mode + ctx.dim = dim + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + with torch.no_grad(): + output_grad = output_grad.contiguous() + dist.all_reduce(output_grad, group=gpc.get_group(ctx.group)) + if len(output_grad.shape) < len(ctx.input_shape): + output_grad = torch.unsqueeze(output_grad, ctx.dim) + dims = [1 for _ in range(len(output_grad.shape))] + dims[ctx.dim] = ctx.input_shape[ctx.dim] + input_grad = output_grad.repeat(tuple(dims)) + return input_grad, None, None, None, None, None + + +class Reduce_3D(torch.autograd.Function): + """Reduce input tensors + """ + @staticmethod + def forward(ctx: Any, input_: Tensor, depth: int, + parallel_mode: ParallelMode) -> Tensor: + dist.all_reduce(input_, group=gpc.get_group(parallel_mode)) + return input_.clone() + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + return output_grad, None, None + + +class Slice_3D(torch.autograd.Function): + """Slice input tensor + """ + @staticmethod + def forward(ctx: Any, input_: Tensor, dim: int, depth: int, + parallel_mode: ParallelMode) -> Tensor: + rank = gpc.get_local_rank(parallel_mode) + out = torch.chunk(input_, depth, dim=dim)[rank].contiguous() + + ctx.depth = depth + ctx.parallel_mode = parallel_mode + ctx.dim = dim + ctx.input_shape = input_.shape + + return out + + @staticmethod + def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: + with torch.no_grad(): + input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode) + input_grad.reshape(ctx.input_shape) + return input_grad, None, None, None diff --git a/colossalai/nn/layer/parallel_3d/_utils.py b/colossalai/nn/layer/parallel_3d/_utils.py new file mode 100644 index 000000000..3c9236017 --- /dev/null +++ b/colossalai/nn/layer/parallel_3d/_utils.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os + +from colossalai.constants import DEPTH_3D +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from torch import Tensor + + +def get_depth_from_env() -> int: + try: + depth = os.environ[DEPTH_3D] + depth = int(depth) + assert depth > 0, 'DEPTH must be greater than zero' + return depth + + except KeyError as e: + raise EnvironmentError( + 'DEPTH is not found in the current environment, ' + 'please make sure that you have used the correct process group initializer' + ) + + +def get_last_group(a, b): + mapping = { + ParallelMode.PARALLEL_3D_INPUT: 'A', + ParallelMode.PARALLEL_3D_WEIGHT: 'B', + ParallelMode.PARALLEL_3D_OUTPUT: 'C', + } + + res = chr( + ord('A') + ord('B') + ord('C') - ord(mapping[a]) - ord(mapping[b])) + + if res == 'A': + return ParallelMode.PARALLEL_3D_INPUT + elif res == 'B': + return ParallelMode.PARALLEL_3D_WEIGHT + elif res == 'C': + return ParallelMode.PARALLEL_3D_OUTPUT + + +def dbg_check_shape(tensor: Tensor, shape: tuple): + rank = gpc.get_global_rank() + if rank == 0: + print(tensor.shape) + assert tensor.shape == shape, \ + '{} does not match {}'.format(tensor.shape, shape) diff --git a/colossalai/nn/layer/parallel_3d/_vit.py b/colossalai/nn/layer/parallel_3d/_vit.py new file mode 100644 index 000000000..ffe7a146a --- /dev/null +++ b/colossalai/nn/layer/parallel_3d/_vit.py @@ -0,0 +1,368 @@ +import math +from typing import Tuple + +import torch +import torch.distributed as dist +from colossalai.context import ParallelMode, seed +from colossalai.core import global_context as gpc +from colossalai.registry import LAYERS +from colossalai.utils import checkpoint, get_current_device +from torch import Tensor, dtype, nn + +from .._common_utils import ACT2FN, divide, set_tensor_parallel_attribute +from ..vanilla_vision_transformer.layers import to_2tuple +from ._utils import get_depth_from_env +from .layers import Linear3D + + +@LAYERS.register_module +class ViTPatchEmbedding3D(nn.Module): + """ 3D Image to Patch Embedding + + :param img_size: iamge size + :type img_size: int + :param patch_size: patch size + :type patch_size: int + :param in_chans: number of channels of input image + :type in_chans: int + :param embed_size: dimension of embedding + :type embed_size: int + :param drop_prob: dropout probability + :type drop_prob: float + :param flatten: whether to flatten output tensor, defaults to True + :type flatten: bool, optional + """ + def __init__(self, + img_size: int, + patch_size: int, + in_chans: int, + embed_size: int, + drop_prob: float, + flatten: bool = True): + super().__init__() + self.depth = get_depth_from_env() + self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT + self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT + self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.embed_size = embed_size + self.embed_size_per_partition = divide(self.embed_size, self.depth) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + with seed(ParallelMode.TENSOR): + self.proj = nn.Conv2d(in_chans, + self.embed_size_per_partition, + kernel_size=patch_size, + stride=patch_size) + + self.cls_token = nn.Parameter( + torch.zeros(1, 1, self.embed_size_per_partition)) + self.pos_embed = nn.Parameter( + torch.zeros(1, self.num_patches + 1, + self.embed_size_per_partition)) + self.pos_drop = nn.Dropout(drop_prob) + + self._sync_parameters() + self.proj.weight.register_hook(self._sync_grad_hook) + self.proj.bias.register_hook(self._sync_grad_hook) + self.cls_token.register_hook(self._sync_grad_hook) + self.pos_embed.register_hook(self._sync_grad_hook) + self._set_tensor_parallel_attribute() + + def _set_tensor_parallel_attribute(self): + set_tensor_parallel_attribute(self.proj.weight) + set_tensor_parallel_attribute(self.proj.bias) + set_tensor_parallel_attribute(self.cls_token) + set_tensor_parallel_attribute(self.pos_embed) + + def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]: + return self.input_parallel_mode, self.weight_parallel_mode + + def _sync_parameters(self): + self.to(get_current_device()) + weight_src_rank = gpc.get_ranks_in_group(self.weight_parallel_mode)[0] + dist.broadcast(self.proj.weight, + src=weight_src_rank, + group=gpc.get_group(self.weight_parallel_mode)) + dist.broadcast(self.proj.bias, + src=weight_src_rank, + group=gpc.get_group(self.weight_parallel_mode)) + input_src_rank = gpc.get_ranks_in_group(self.input_parallel_mode)[0] + dist.broadcast(self.proj.weight, + src=input_src_rank, + group=gpc.get_group(self.input_parallel_mode)) + dist.broadcast(self.proj.bias, + src=input_src_rank, + group=gpc.get_group(self.input_parallel_mode)) + set_tensor_parallel_attribute(self.proj.weight) + set_tensor_parallel_attribute(self.proj.bias) + set_tensor_parallel_attribute(self.cls_token) + set_tensor_parallel_attribute(self.pos_embed) + + def _sync_grad_hook(self, grad) -> None: + dist.all_reduce(grad, group=gpc.get_group(self.input_parallel_mode)) + dist.all_reduce(grad, group=gpc.get_group(self.weight_parallel_mode)) + return grad + + def forward(self, x: Tensor) -> Tensor: + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + + # split a partition from embedded states + x = torch.chunk(x, self.depth, dim=0)[gpc.get_local_rank( + self.weight_parallel_mode)].contiguous() + x = torch.chunk(x, self.depth, dim=0)[gpc.get_local_rank( + self.input_parallel_mode)].contiguous() + + # add cls token & pos embedding + # [b/q^2,s,h/q] --> [b/q^2, 1+s, h/q] + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_token, x), dim=1) + + with seed(ParallelMode.TENSOR): + x = self.pos_drop(x + self.pos_embed) + + return x + + +@LAYERS.register_module +class ViTSelfAttention3D(nn.Module): + """Self-attention layer for 3D parallel Vision Transformer + + :param hidden_size: hidden size + :type hidden_size: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param attention_probs_dropout_prob: dropout probability for attention layers + :type attention_probs_dropout_prob: bool + :param hidden_dropout_prob: dropout probability for hidden layers + :type hidden_dropout_prob: bool + :param depth: the 3D parallelism depth + :type depth: int + :param input_parallel_mode: parallel mode of input tensor + :type input_parallel_mode: ParallelMode + :param weight_parallel_mode: parallel mode of weight + :type weight_parallel_mode: ParallelMode + :param dtype: dtype of parameters, defaults to None + :type dtype: dtype, optional + :param bias: whether to add bias, defaults to True + :type bias: bool, optional + """ + def __init__(self, + hidden_size: int, + num_attention_heads: int, + attention_probs_dropout_prob: float, + hidden_dropout_prob: float, + dtype: dtype = None, + bias: bool = True, + checkpoint: bool = False): + super().__init__() + self.depth = get_depth_from_env() + self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT + self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT + self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT + self.hidden_size = hidden_size + self.num_attention_heads = divide(num_attention_heads, self.depth) + self.attention_head_size = divide(hidden_size, num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.checkpoint = checkpoint + + self.query_key_value = Linear3D(self.hidden_size, + 3 * self.hidden_size, + self.input_parallel_mode, + self.weight_parallel_mode, + dtype=dtype, + bias=bias) + self.attention_dropout = nn.Dropout(attention_probs_dropout_prob) + self.dense = Linear3D(self.hidden_size, + self.hidden_size, + self.output_parallel_mode, + self.weight_parallel_mode, + dtype=dtype, + bias=bias) + self.dropout = nn.Dropout(hidden_dropout_prob) + self.softmax = nn.Softmax(dim=-1) + + def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]: + return self.input_parallel_mode, self.weight_parallel_mode + + def _forward(self, hidden_states: Tensor) -> Tensor: + query_key_value = self.query_key_value(hidden_states) + new_qkv_shape = query_key_value.shape[:-1] + \ + (self.num_attention_heads, 3 * self.attention_head_size) + query_key_value = query_key_value.view(new_qkv_shape) + query_key_value = query_key_value.permute((0, 2, 1, 3)) + query_layer, key_layer, value_layer = torch.chunk(query_key_value, + 3, + dim=-1) + + attention_scores = torch.matmul(query_layer, + key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt( + self.attention_head_size) + attention_probs = self.softmax(attention_scores) + with seed(ParallelMode.TENSOR): + attention_probs = self.attention_dropout(attention_probs) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.transpose(1, 2) + new_context_layer_shape = context_layer.size()[:-2] + ( + self.all_head_size, ) + context_layer = context_layer.reshape(new_context_layer_shape) + + output = self.dense(context_layer) + with seed(ParallelMode.TENSOR): + output = self.dropout(output) + + return output + + def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor: + return checkpoint(self._forward, hidden_states) + + def forward(self, hidden_states: Tensor) -> Tensor: + if self.checkpoint: + return self._checkpoint_forward(hidden_states) + else: + return self._forward(hidden_states) + + +@LAYERS.register_module +class ViTMLP3D(nn.Module): + """[summary] + + :param hidden_size: hidden size + :type hidden_size: int + :param mlp_ratio: hidden size of MLP divided by embedding dim + :type mlp_ratio: int + :param hidden_dropout_prob: dropout probability for hidden layers + :type hidden_dropout_prob: float + :param hidden_act: activation function for hidden layers + :type hidden_act: str + :param depth: the 3D parallelism depth + :type depth: int + :param input_parallel_mode: parallel mode of input tensor + :type input_parallel_mode: ParallelMode + :param weight_parallel_mode: parallel mode of weight + :type weight_parallel_mode: ParallelMode + :param dtype: dtype of parameters, defaults to None + :type dtype: dtype, optional + :param bias: whether to add bias, defaults to True + :type bias: bool, optional + """ + def __init__(self, + hidden_size: int, + mlp_ratio: int, + hidden_dropout_prob: float, + hidden_act: str = 'gelu', + dtype: dtype = None, + bias: bool = True, + checkpoint: bool = False): + super().__init__() + self.depth = get_depth_from_env() + self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT + self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT + self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT + self.hidden_size = hidden_size + self.mlp_ratio = mlp_ratio + self.checkpoint = checkpoint + + self.dense_1 = Linear3D(self.hidden_size, + self.mlp_ratio * self.hidden_size, + self.input_parallel_mode, + self.weight_parallel_mode, + dtype=dtype, + bias=bias) + self.activation_func = ACT2FN[hidden_act] + self.dense_2 = Linear3D(self.mlp_ratio * self.hidden_size, + self.hidden_size, + self.output_parallel_mode, + self.weight_parallel_mode, + dtype=dtype, + bias=bias) + self.dropout = nn.Dropout(hidden_dropout_prob) + + def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]: + return self.input_parallel_mode, self.weight_parallel_mode + + def _forward(self, hidden_states: Tensor) -> Tensor: + intermediate_output = self.dense_1(hidden_states) + intermediate_output = self.activation_func(intermediate_output) + output = self.dense_2(intermediate_output) + with seed(ParallelMode.TENSOR): + output = self.dropout(output) + return output + + def _checkpoint_forward(self, hidden_states: Tensor) -> Tensor: + return checkpoint(self._forward, hidden_states) + + def forward(self, hidden_states: Tensor) -> Tensor: + if self.checkpoint: + return self._checkpoint_forward(hidden_states) + else: + return self._forward(hidden_states) + + +@LAYERS.register_module +class ViTHead3D(nn.Module): + """Output layer for 3D parallel Vision Transformer + + :param in_features: size of input tensor + :type in_features: int + :param num_classes: number of classes + :type num_classes: int + :param depth: the 3D parallelism depth + :type depth: int + :param input_parallel_mode: parallel mode of input tensor + :type input_parallel_mode: ParallelMode + :param weight_parallel_mode: parallel mode of weight + :type weight_parallel_mode: ParallelMode + :param dtype: dtype of parameters, defaults to None + :type dtype: dtype, optional + :param bias: whether to add bias, defaults to True + :type bias: bool, optional + """ + def __init__(self, + in_features: int, + num_classes: int, + dtype: dtype = None, + bias: bool = True): + super().__init__() + self.depth = get_depth_from_env() + self.input_parallel_mode = ParallelMode.PARALLEL_3D_INPUT + self.weight_parallel_mode = ParallelMode.PARALLEL_3D_WEIGHT + self.output_parallel_mode = ParallelMode.PARALLEL_3D_OUTPUT + self.in_features = in_features + self.num_classes = num_classes + out_features = math.ceil(self.num_classes / + (self.depth**2)) * (self.depth**2) + self.num_classes_per_partition = divide(self.num_classes, self.depth) + self.linear = Linear3D(self.in_features, + out_features, + self.input_parallel_mode, + self.weight_parallel_mode, + dtype=dtype, + bias=bias) + + def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]: + return self.linear.groups_for_next_layer() + + def forward(self, x: Tensor) -> Tensor: + # [b/q^2, s, h/q] --> [b/q^2, h/q] + x = x[:, 0] + # [b/q^2, h/q] --> [b/q^2, c/q] + x = self.linear(x) + return x[:, :self.num_classes_per_partition] + + def extra_repr(self): + return 'in_features={}, num_classes={}'.format(self.in_features, + self.num_classes) diff --git a/colossalai/nn/layer/parallel_3d/layers.py b/colossalai/nn/layer/parallel_3d/layers.py new file mode 100644 index 000000000..c6d631008 --- /dev/null +++ b/colossalai/nn/layer/parallel_3d/layers.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math +from typing import Tuple + +import torch +import torch.nn as nn +from colossalai.context import ParallelMode, seed +from colossalai.registry import LAYERS +from colossalai.utils import get_current_device +from torch import Tensor, dtype +from torch.nn import Parameter + +from .._common_utils import divide, set_tensor_parallel_attribute +from ._operation import Add_3D, Matmul_AB_3D, Mul_3D, Sum_3D +from ._utils import get_depth_from_env, get_last_group + + +@LAYERS.register_module +class LayerNorm3D(nn.Module): + def __init__( + self, + normalized_shape: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + eps: float = 1e-12, + dtype: dtype = None, + ): + super().__init__() + self.input_parallel_mode = input_parallel_mode + self.weight_parallel_mode = weight_parallel_mode + self.output_parallel_mode = get_last_group(self.input_parallel_mode, + self.weight_parallel_mode) + self.depth = get_depth_from_env() + self.normalized_shape = normalized_shape + self.normalized_shape_per_partition = divide(normalized_shape, + self.depth**2) + + self.weight = Parameter( + torch.ones(self.normalized_shape_per_partition, + device=get_current_device(), + dtype=dtype)) + self.bias = Parameter( + torch.zeros(self.normalized_shape_per_partition, + device=get_current_device(), + dtype=dtype)) + self.variance_epsilon = eps + self._set_tensor_parallel_attributes() + + def _set_tensor_parallel_attributes(self): + set_tensor_parallel_attribute(self.weight) + set_tensor_parallel_attribute(self.bias) + + def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]: + return self.input_parallel_mode, self.weight_parallel_mode + + def reset_parameters(self): + nn.init.zeros_(self.bias) + nn.init.ones_(self.weight) + + def forward(self, input_: Tensor) -> Tensor: + '''x = weight * (x - mean) / sqrt(var + eps) + bias''' + # input: [m/q^2, n, h/q] + # [m/q^2, n, 1] + mean = Sum_3D.apply(input_, -1, self.depth, self.output_parallel_mode, + True) / self.normalized_shape + # [m/q^2, n, 1] + var = (input_ - mean).pow(2) + var = Sum_3D.apply(var, -1, self.depth, self.output_parallel_mode, + True) / self.normalized_shape + + output = (input_ - mean) / torch.sqrt(var + self.variance_epsilon) + output = Mul_3D.apply(output, self.weight, self.depth, + self.input_parallel_mode, + self.weight_parallel_mode, + self.output_parallel_mode) + output = Add_3D.apply(output, self.bias, self.depth, + self.input_parallel_mode, + self.weight_parallel_mode, + self.output_parallel_mode) + return output + + def extra_repr(self): + return '{}, eps={}'.format(self.normalized_shape, + self.variance_epsilon) + + +@LAYERS.register_module +class Linear3D(nn.Module): + def __init__(self, + in_features: int, + out_features: int, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + bias: bool = True, + dtype: dtype = None): + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.input_parallel_mode = input_parallel_mode + self.weight_parallel_mode = weight_parallel_mode + self.output_parallel_mode = get_last_group(self.input_parallel_mode, + self.weight_parallel_mode) + self.with_bias = bias + self.depth = get_depth_from_env() + self.in_features_per_partition = divide(in_features, self.depth) + self.out_features_per_partition = divide(out_features, self.depth**2) + + # [k/q, h/q^2] + self.weight = Parameter( + torch.empty(self.in_features_per_partition, + self.out_features_per_partition, + device=get_current_device(), + dtype=dtype)) + + # [h/q^2] + if bias: + self.bias = Parameter( + torch.zeros(self.out_features_per_partition, + device=get_current_device(), + dtype=dtype)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + self._set_tensor_parallel_attributes() + + def _set_tensor_parallel_attributes(self): + set_tensor_parallel_attribute(self.weight) + if self.bias is not None: + set_tensor_parallel_attribute(self.bias) + + def groups_for_next_layer(self) -> Tuple[ParallelMode, ParallelMode]: + return self.output_parallel_mode, self.weight_parallel_mode + + def reset_parameters(self): + # setting + fan_in = self.in_features + a = math.sqrt(5) + nonlinearity = 'leaky_relu' + + # init weight + std = nn.init.calculate_gain(nonlinearity, a) / math.sqrt(fan_in) + bound = math.sqrt(3.0) * std + with seed(ParallelMode.TENSOR): + nn.init.uniform_(self.weight, -bound, bound) + + # init bias + if self.with_bias: + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + with seed(ParallelMode.TENSOR): + nn.init.uniform_(self.bias, -bound, bound) + + def forward(self, input_: Tensor) -> Tensor: + # input: [m/q^2, n, k/q] + # output: [m/q^2, n, h/q] + output = Matmul_AB_3D.apply(input_, self.weight, self.depth, + self.input_parallel_mode, + self.weight_parallel_mode, + self.output_parallel_mode) + + if self.with_bias: + output = Add_3D.apply(output, self.bias, self.depth, + self.output_parallel_mode, + self.weight_parallel_mode, + self.input_parallel_mode) + return output + + def extra_repr(self): + return 'in_features={}, out_features={}, bias={}'.format( + self.in_features, self.out_features, self.with_bias) diff --git a/colossalai/nn/layer/parallel_sequence/__init__.py b/colossalai/nn/layer/parallel_sequence/__init__.py new file mode 100644 index 000000000..4fa9eed6f --- /dev/null +++ b/colossalai/nn/layer/parallel_sequence/__init__.py @@ -0,0 +1,4 @@ +from ._operation import RingQK, RingAV +from .layers import TransformerSelfAttentionRing + +__all__ = ['TransformerSelfAttentionRing', 'RingAV', 'RingQK'] diff --git a/colossalai/nn/layer/parallel_sequence/_operation.py b/colossalai/nn/layer/parallel_sequence/_operation.py new file mode 100644 index 000000000..d5f65d5d8 --- /dev/null +++ b/colossalai/nn/layer/parallel_sequence/_operation.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +from torch import distributed as dist + +from colossalai.communication import ring_forward +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_sequence._utils import _calc_incoming_device_range, _calc_current_device_range +from colossalai.utils import get_current_device + + +class RingQK(torch.autograd.Function): + """ + Calculate QK in a ring-exchange style + """ + + @staticmethod + def forward(ctx, + sub_q, + sub_k, + batch_size, + num_attention_heads, + sub_seq_length): + # save tensor for backward + ctx.save_for_backward(sub_q, sub_k) + ctx.sub_seq_length = sub_seq_length + + # create local segment of attention score + attention_score = torch.empty( + batch_size * num_attention_heads, + sub_seq_length, + sub_seq_length * gpc.get_world_size(ParallelMode.SEQUENCE), + dtype=sub_q.dtype, + device=get_current_device() + ) + + # compute local QK^T + part_a = torch.matmul(sub_q, sub_k.transpose(2, 1)) + local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) + local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) + start_idx = local_rank * sub_seq_length + end_idx = (local_rank + 1) * sub_seq_length + attention_score[:, :, start_idx: end_idx] = part_a + + # compute QK^T in ring-all-reduce style + for i in range(local_world_size - 1): + sub_k = ring_forward(sub_k, ParallelMode.SEQUENCE) + start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, sub_seq_length) + part_a = torch.matmul(sub_q, sub_k.transpose(2, 1)) + attention_score[:, :, start_idx:end_idx] = part_a + + return attention_score + + @staticmethod + def backward(ctx, grad_output): + sub_q, sub_k, = ctx.saved_tensors + local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) + local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) + + # calculate gradient of sub_k + grad_k = torch.matmul( + grad_output.transpose(2, 1), + sub_q + ) + dist.all_reduce(grad_k, group=gpc.get_group(ParallelMode.SEQUENCE)) + grad_k = grad_k[:, local_rank * ctx.sub_seq_length: (local_rank + 1) * ctx.sub_seq_length] + grad_k /= local_world_size + + # calculate gradient for sub_q + grad_q = torch.zeros_like(sub_q, + dtype=sub_q.dtype, + device=get_current_device(), ) + + # compute with local sub_k + start_idx, end_idx = _calc_current_device_range(local_rank, ctx.sub_seq_length) + grad_q += torch.matmul(grad_output[:, :, start_idx:end_idx], sub_k) + + # compute QK^T in ring-all-reduce style + for i in range(local_world_size - 1): + sub_k = ring_forward(sub_k, ParallelMode.SEQUENCE) + start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, ctx.sub_seq_length) + grad_q += torch.matmul(grad_output[:, :, start_idx: end_idx], sub_k) + + grad_q /= local_world_size + + return grad_q, grad_k, None, None, None + + +class RingAV(torch.autograd.Function): + """ + Calculate AV in a ring-exchange style + """ + + @staticmethod + def forward(ctx, + attention_score, + sub_v, + batch_size, + num_attention_heads, + attention_head_size, + sub_seq_length): + local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) + local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) + local_start_idx, local_end_idx = _calc_current_device_range(local_rank, sub_seq_length) + + sub_attention_result = torch.zeros( + batch_size * num_attention_heads, + sub_seq_length, + attention_head_size, + device=get_current_device(), + dtype=attention_score.dtype) + + # save tensors for backward + ctx.save_for_backward(attention_score, sub_v) + ctx.sub_seq_length = sub_seq_length + + # compute local AV + part_av = torch.matmul(attention_score[:, :, local_start_idx:local_end_idx], sub_v) + sub_attention_result += part_av + + # compute AV in ring - all - reduce style + for i in range(local_world_size - 1): + sub_v = ring_forward(sub_v, ParallelMode.SEQUENCE) + start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, sub_seq_length) + + # compute QK^T + part_av = torch.matmul(attention_score[:, :, start_idx:end_idx], sub_v) + sub_attention_result += part_av + return sub_attention_result + + @staticmethod + def backward(ctx, grad_output): + local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) + local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) + local_start_idx, local_end_idx = _calc_current_device_range(local_rank, ctx.sub_seq_length) + attention_scores, sub_v = ctx.saved_tensors + + # calculate gradient of v + grad_v = torch.matmul( + attention_scores.transpose(2, 1), + grad_output + ) + dist.all_reduce(grad_v, group=gpc.get_group(ParallelMode.SEQUENCE)) + grad_v = grad_v[:, local_start_idx:local_end_idx] + grad_v /= local_world_size + + # calculate gradient for attention score + grad_attention_score = torch.zeros_like(attention_scores, + dtype=grad_output.dtype, + device=get_current_device()) + + # compute with local sub_k + grad_attention_score[:, :, local_start_idx:local_end_idx] += torch.matmul( + grad_output, + sub_v.transpose(2, 1)) + + # compute QK^T in ring-all-reduce style + for i in range(local_world_size - 1): + sub_v = ring_forward(sub_v, ParallelMode.SEQUENCE) + start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, ctx.sub_seq_length) + + # compute grad_q + grad_attention_score[:, :, start_idx:end_idx] += torch.matmul( + grad_output, + sub_v.transpose(2, 1)) + + return grad_attention_score, grad_v, None, None, None, None diff --git a/colossalai/nn/layer/parallel_sequence/_utils.py b/colossalai/nn/layer/parallel_sequence/_utils.py new file mode 100644 index 000000000..9fad8fab2 --- /dev/null +++ b/colossalai/nn/layer/parallel_sequence/_utils.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + + +def _calc_incoming_device_range(i, rank, world_size, sub_seq_length): + device_of_incoming_k = (rank - i - 1) % world_size + start_idx = sub_seq_length * device_of_incoming_k + end_idx = sub_seq_length * (device_of_incoming_k + 1) + return start_idx, end_idx + + +def _calc_current_device_range(rank, sub_seq_length): + start_idx = sub_seq_length * rank + end_idx = sub_seq_length * (rank + 1) + return start_idx, end_idx diff --git a/colossalai/nn/layer/parallel_sequence/layers.py b/colossalai/nn/layer/parallel_sequence/layers.py new file mode 100644 index 000000000..132fc3dcc --- /dev/null +++ b/colossalai/nn/layer/parallel_sequence/layers.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_sequence._operation import RingQK, RingAV +from colossalai.registry import LAYERS + + +@LAYERS.register_module +class TransformerSelfAttentionRing(nn.Module): + """Parallel self-attention layer abstract class. + Self-attention layer takes input with size [b, s, h] + and returns output of the same size. + + :param hidden_size: hidden size + :type hidden_size: int + :param kv_channels: channels of key/value tensor + :type kv_channels: int + :param num_attention_heads: number of attention heads + :type num_attention_heads: int + :param attention_dropout: dropout probability for attention layer + :type attention_dropout: float + """ + + def __init__(self, + hidden_size, + kv_channels, + num_attention_heads, + attention_dropout, + ): + super().__init__() + + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + + projection_size = kv_channels * num_attention_heads + self.hidden_size_per_attention_head = projection_size // num_attention_heads + + self.world_size = gpc.get_world_size(ParallelMode.SEQUENCE) + + # Strided linear layer. + self.query_key_value = nn.Linear( + hidden_size, + 3 * projection_size, + ) + + # coeff = None + self.norm_factor = math.sqrt(self.hidden_size) + + # TODO: add apply_query_key_layer_scaling when we have the kernel module + # if self.apply_query_key_layer_scaling: + # coeff = self.layer_number + # self.norm_factor *= coeff + + # TODO: add fused scale mask softmax kernel when we have the kernel module + # self.scale_mask_softmax = FusedScaleMaskSoftmax( + # self.fp16, self.bf16, + # self.attn_mask_type, + # masked_softmax_fusion, + # attention_mask_func, + # self.attention_softmax_in_fp32, + # coeff) + + self.attention_dropout = nn.Dropout(attention_dropout) + + # Output. + self.dense = nn.Linear( + projection_size, + hidden_size, + bias=True) + + def forward(self, hidden_states, attention_mask): + # hidden_states: [sq, b, h] + + sub_seq_length, batch_size, hidden_size = hidden_states.size() + + # ===================== + # Query, Key, and Value + # ===================== + + # Attention heads [sq, b, h] --> [sq, b, (3 * hn * num_heads)] + mixed_x_layer = self.query_key_value(hidden_states) + + # [sq, b, num_heads, 3 * hn] --> 3 [sq, b, num_heads, hn] + new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads, + 3 * self.hidden_size_per_attention_head) + mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) + + # split into query, key and value + last_dim = mixed_x_layer.dim() - 1 + last_dim_value = mixed_x_layer.size()[-1] + assert last_dim_value % 3 == 0, 'the last dimension is not a multiple of 3, ' \ + 'cannot be divided into query, key and value' + partition_size = last_dim_value // 3 + (query_layer, key_layer, value_layer) = torch.split( + mixed_x_layer, partition_size, dim=last_dim) + + # =================================== + # Raw attention scores. [b, num_heads, s, s] + # =================================== + + # [b, num_heads, sq, sk] + output_size = (query_layer.size(1), + query_layer.size(2), + query_layer.size(0), + key_layer.size(0) * self.world_size) + + # [sq, b, num_heads, hn] -> [sq, b * num_heads, hn] + query_layer = query_layer.view(output_size[2], + output_size[0] * output_size[1], -1) + # [sk, b, num_heads, hn] -> [sk, b * num_heads, hn] + key_layer = key_layer.view(key_layer.size(0), + output_size[0] * output_size[1], -1) + + # [b, sq, sk] + attention_scores = RingQK.apply( + # [b * num_heads, sq, hn] + query_layer.transpose(0, 1).contiguous(), + key_layer.transpose(0, 1).contiguous(), # [b * num_heads, sk, hn], + batch_size, + self.num_attention_heads, + sub_seq_length + ) + attention_scores /= self.norm_factor + + # change view to [b, num_heads, sq, sk] + attention_scores = attention_scores.view(*output_size) + attention_scores = attention_scores.unsqueeze(1) + + attention_scores = attention_scores + attention_mask + attention_probs = F.softmax(attention_scores, dim=-1) + attention_probs = attention_probs.squeeze(1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + # with mpu.get_cuda_rng_tracker().fork(): + # TODO: check if a rng tracker is needed + attention_probs = self.attention_dropout(attention_probs) + + # context layer shape: [b, num_heads, sq, hn] + output_size = (value_layer.size(1), + value_layer.size(2), + query_layer.size(0), + value_layer.size(3)) + # + # # change view [sk, b * num_heads, hn] + value_layer = value_layer.contiguous().view(value_layer.size(0), + output_size[0] * output_size[1], -1) + + # # change view [b * num_heads, sq, sk] + attention_probs = attention_probs.view(attention_probs.size(0) * attention_probs.size(1), + attention_probs.size(2), + attention_probs.size(3)) + + # matmul: [b*num_heads, sq, hn] + # context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + context_layer = RingAV.apply( + attention_probs, + value_layer.transpose(0, 1).contiguous(), + batch_size, + self.num_attention_heads, + self.hidden_size_per_attention_head, + sub_seq_length + ) + + # # change view [b, num_heads, sq, hn] + context_layer = context_layer.view(*output_size) + + # # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + ( + self.hidden_size_per_attention_head * self.num_attention_heads,) + context_layer = context_layer.view(*new_context_layer_shape) + + # context_layer = context_layer.transpose(1, 0).contiguous() + output = self.dense(context_layer) + bias = self.dense.bias + + return output, bias diff --git a/colossalai/nn/layer/parallel_vision_transformer/__init__.py b/colossalai/nn/layer/parallel_vision_transformer/__init__.py new file mode 100644 index 000000000..8adf9eb30 --- /dev/null +++ b/colossalai/nn/layer/parallel_vision_transformer/__init__.py @@ -0,0 +1,3 @@ +from .layers import ViTBlock + +__all__ = ['ViTBlock'] diff --git a/colossalai/nn/layer/parallel_vision_transformer/layers.py b/colossalai/nn/layer/parallel_vision_transformer/layers.py new file mode 100644 index 000000000..8624f7f66 --- /dev/null +++ b/colossalai/nn/layer/parallel_vision_transformer/layers.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from torch import nn as nn + +from colossalai.builder import build_layer +from colossalai.registry import LAYERS + + +@LAYERS.register_module +class ViTBlock(nn.Module): + """Vision Transformer block + + :param attention_cfg: config of attention layer + :type attention_cfg: dict + :param droppath_cfg: config of drop path + :type droppath_cfg: dict + :param mlp_cfg: config of MLP layer + :type mlp_cfg: dict + :param norm_cfg: config of normlization layer + :type norm_cfg: dict + """ + + def __init__(self, + attention_cfg: dict, + droppath_cfg: dict, + mlp_cfg: dict, + norm_cfg: dict, + ): + super().__init__() + self.norm1 = build_layer(norm_cfg) + self.attn = build_layer(attention_cfg) + self.drop_path = build_layer( + droppath_cfg) if droppath_cfg['drop_path'] > 0. else nn.Identity() + self.norm2 = build_layer(norm_cfg) + self.mlp = build_layer(mlp_cfg) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + # x_ = x + # x_ = self.norm1(x_) + # if self.checkpoint: + # x_ = checkpoint(self.attn, x_) + # else: + # x_ = self.attn(x_) + # x_ = self.drop_path(x_) + # x = x + x_ + # + # x_ = x + # x_ = self.norm2(x_) + # if self.checkpoint: + # x_ = checkpoint(self.mlp, x_) + # else: + # x_ = self.mlp(x_) + # x_ = self.drop_path(x_) + # x = x + x_ + return x diff --git a/colossalai/nn/layer/vanilla_resnet/__init__.py b/colossalai/nn/layer/vanilla_resnet/__init__.py new file mode 100644 index 000000000..289b8749e --- /dev/null +++ b/colossalai/nn/layer/vanilla_resnet/__init__.py @@ -0,0 +1,5 @@ +from .basic_block import ResNetBasicBlock +from .bottleneck import ResNetBottleneck +from .reslayer import ResLayer + +__all__ = ['ResLayer', 'ResNetBottleneck', 'ResNetBasicBlock'] diff --git a/colossalai/nn/layer/vanilla_resnet/basic_block.py b/colossalai/nn/layer/vanilla_resnet/basic_block.py new file mode 100644 index 000000000..320dac2fd --- /dev/null +++ b/colossalai/nn/layer/vanilla_resnet/basic_block.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Optional, Callable + +import torch.nn as nn +from torch import Tensor + +from colossalai.registry import LAYERS +from .conv import conv3x3 + + +@LAYERS.register_module +class ResNetBasicBlock(nn.Module): + """Basic ResNet block + """ + expansion: int = 1 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super().__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError( + 'BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError( + "Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out diff --git a/colossalai/nn/layer/vanilla_resnet/bottleneck.py b/colossalai/nn/layer/vanilla_resnet/bottleneck.py new file mode 100644 index 000000000..d75f9534b --- /dev/null +++ b/colossalai/nn/layer/vanilla_resnet/bottleneck.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Optional, Callable + +import torch.nn as nn +from torch import Tensor + +from colossalai.registry import LAYERS +from .conv import conv3x3, conv1x1 + + +@LAYERS.register_module +class ResNetBottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion: int = 4 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super().__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out diff --git a/colossalai/nn/layer/vanilla_resnet/conv.py b/colossalai/nn/layer/vanilla_resnet/conv.py new file mode 100644 index 000000000..c918d94c4 --- /dev/null +++ b/colossalai/nn/layer/vanilla_resnet/conv.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.nn as nn + + +def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) diff --git a/colossalai/nn/layer/vanilla_resnet/reslayer.py b/colossalai/nn/layer/vanilla_resnet/reslayer.py new file mode 100644 index 000000000..4e1b48c5e --- /dev/null +++ b/colossalai/nn/layer/vanilla_resnet/reslayer.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.nn as nn + +from colossalai.registry import LAYERS +from .conv import conv1x1 + + +@LAYERS.register_module +class ResLayer(nn.Module): + + def __init__(self, + block_type: str, + norm_layer_type: str, + inplanes: int, + planes: int, + blocks: int, + groups: int, + base_width: int, + stride: int = 1, + dilation: int = 1, + dilate: bool = False, + ): + super().__init__() + self.block = LAYERS.get_module(block_type) + self.norm_layer = LAYERS.get_module(norm_layer_type) + self.inplanes = inplanes + self.planes = planes + self.blocks = blocks + self.groups = groups + self.dilation = dilation + self.base_width = base_width + self.dilate = dilate + self.stride = stride + self.layer = self._make_layer() + + def _make_layer(self): + norm_layer = self.norm_layer + downsample = None + previous_dilation = self.dilation + if self.dilate: + self.dilation *= self.stride + self.stride = 1 + if self.stride != 1 or self.inplanes != self.planes * self.block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, self.planes * self.block.expansion, self.stride), + norm_layer(self.planes * self.block.expansion), + ) + + layers = [] + layers.append(self.block(self.inplanes, self.planes, self.stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = self.planes * self.block.expansion + for _ in range(1, self.blocks): + layers.append(self.block(self.inplanes, self.planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + return self.layer(x) diff --git a/colossalai/nn/layer/vanilla_vision_transformer/__init__.py b/colossalai/nn/layer/vanilla_vision_transformer/__init__.py new file mode 100644 index 000000000..90d614e0a --- /dev/null +++ b/colossalai/nn/layer/vanilla_vision_transformer/__init__.py @@ -0,0 +1,7 @@ +from .layers import (VanillaViTBlock, VanillaViTMLP, VanillaViTPatchEmbedding, + VanillaViTAttention, VanillaViTDropPath, VanillaViTHead) + +__all__ = [ + 'VanillaViTBlock', 'VanillaViTMLP', 'VanillaViTPatchEmbedding', + 'VanillaViTAttention', 'VanillaViTDropPath', 'VanillaViTHead' +] diff --git a/colossalai/nn/layer/vanilla_vision_transformer/layers.py b/colossalai/nn/layer/vanilla_vision_transformer/layers.py new file mode 100644 index 000000000..6f7ec4c7c --- /dev/null +++ b/colossalai/nn/layer/vanilla_vision_transformer/layers.py @@ -0,0 +1,244 @@ +import collections.abc +from itertools import repeat + +import torch +from torch import nn as nn + +from colossalai.registry import LAYERS + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_2tuple = _ntuple(2) + + +@LAYERS.register_module +class VanillaViTPatchEmbedding(nn.Module): + """ 2D Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, drop=0.): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.grid_size = (img_size[0] // patch_size[0], + img_size[1] // patch_size[1]) + self.num_patches = self.grid_size[0] * self.grid_size[1] + self.flatten = flatten + + self.proj = nn.Conv2d(in_chans, embed_dim, + kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim)) + self.pos_drop = nn.Dropout(p=drop) + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + return x + + +@LAYERS.register_module +class VanillaViTMLP(nn.Module): + """ MLP as used in Vision Transformer, MLP-Mixer and related networks + """ + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def drop_path(x, drop_prob: float = 0., training: bool = False): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + # work with diff dim tensors, not just 2D ConvNets + shape = (x.shape[0],) + (1,) * (x.ndim - 1) + random_tensor = keep_prob + \ + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + +@LAYERS.register_module +class VanillaViTDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + + def __init__(self, drop_prob=0.): + super().__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + +@LAYERS.register_module +class VanillaViTAttention(nn.Module): + """Vanilla attention layer of Vision Transformer + + :param dim: dimension of input tensor + :type dim: int + :param num_heads: number of attention heads, defaults to 8 + :type num_heads: int, optional + :param qkv_bias: enable bias for qkv if True, defaults to False + :type qkv_bias: bool, optional + :param attn_drop: dropout probability for attention layer, defaults to 0. + :type attn_drop: float, optional + :param proj_drop: dropout probability for linear layer, defaults to 0. + :type proj_drop: float, optional + """ + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // + self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +@LAYERS.register_module +class VanillaViTBlock(nn.Module): + + """Vanilla Vision Transformer block + + :param dim: dimension of input tensor + :type dim: int + :param num_heads: number of attention heads + :type num_heads: int + :param mlp_ratio: hidden size of MLP divided by embedding dim, defaults to 4. + :type mlp_ratio: float, optional + :param qkv_bias: enable bias for qkv if True, defaults to False + :type qkv_bias: bool, optional + :param drop: dropout probability, defaults to 0. + :type drop: float, optional + :param attn_drop: dropout probability for attention layer, defaults to 0. + :type attn_drop: float, optional + :param drop_path: drop path probability, defaults to 0. + :type drop_path: float, optional + :param act_layer: activation function, defaults to nn.GELU + :type act_layer: torch.nn.Module, optional + :param norm_layer: normalization layer, defaults to nn.LayerNorm + :type norm_layer: torch.nn.Module, optional + """ + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., + drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = LAYERS.get_module('VanillaViTAttention')(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = LAYERS.get_module('VanillaViTDropPath')( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = LAYERS.get_module('VanillaViTMLP')(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +@LAYERS.register_module +class VanillaViTHead(nn.Module): + """Output layer of vanilla Vision Transformer + + :param in_features: size of input tensor + :type in_features: int + :param intermediate_features: hidden size + :type intermediate_features: int + :param out_features: size of output tensor + :type out_features: int + :param bias: whether to add bias, defaults to True + :type bias: bool, optional + """ + + def __init__(self, + in_features, + intermediate_features, + out_features, + bias=True + ): + super().__init__() + self.linear_1 = nn.Linear( + in_features, intermediate_features, bias=bias) + self.act = nn.Tanh() + self.linear_2 = nn.Linear( + intermediate_features, out_features, bias=bias) + + def forward(self, x): + x = x[:, 0, :].squeeze(1) + x = self.linear_1(x) + x = self.act(x) + x = self.linear_2(x) + return x diff --git a/colossalai/nn/layer/wrapper/__init__.py b/colossalai/nn/layer/wrapper/__init__.py new file mode 100644 index 000000000..a19f65dcc --- /dev/null +++ b/colossalai/nn/layer/wrapper/__init__.py @@ -0,0 +1,3 @@ +from .lambda_wrapper import LambdaWrapper + +__all__ = ['LambdaWrapper'] diff --git a/colossalai/nn/layer/wrapper/lambda_wrapper.py b/colossalai/nn/layer/wrapper/lambda_wrapper.py new file mode 100644 index 000000000..d2b06f6b4 --- /dev/null +++ b/colossalai/nn/layer/wrapper/lambda_wrapper.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.nn as nn + +from colossalai.builder import build_layer +from colossalai.registry import LAYERS + + +@LAYERS.register_module +class LambdaWrapper(nn.Module): + """Wrap a function to nn.Module, which takes a config of layers and can fully access them + + :param func: user customed function + :type func: Callable + :param layers_cfg: config of layers, defaults to None + :type layers_cfg: dict, optional + """ + + def __init__(self, func, layers_cfg: dict = None): + super().__init__() + self.func = func + self.layers = self._build_layers(layers_cfg) + + def _build_layers(self, layers_cfg: dict): + if layers_cfg is None: + return None + else: + layers = [] + + for cfg in layers_cfg: + layer = build_layer(cfg) + layers.append(layer) + return layers + + def forward(self, *args, **kwargs): + return self.func(self, *args, **kwargs) diff --git a/colossalai/nn/loss/__init__.py b/colossalai/nn/loss/__init__.py new file mode 100644 index 000000000..6015c55c6 --- /dev/null +++ b/colossalai/nn/loss/__init__.py @@ -0,0 +1,6 @@ +from .base_loss import BaseLoss +from .cross_entropy_2d import CrossEntropyLoss2D +from .cross_entropy_2p5d import CrossEntropyLoss2p5D +from .cross_entropy_3d import CrossEntropyLoss3D + +__all__ = ['CrossEntropyLoss2D', 'CrossEntropyLoss2p5D', 'CrossEntropyLoss3D'] diff --git a/colossalai/nn/loss/base_loss.py b/colossalai/nn/loss/base_loss.py new file mode 100644 index 000000000..bf5bbe6b2 --- /dev/null +++ b/colossalai/nn/loss/base_loss.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC, abstractmethod + + +class BaseLoss(ABC): + """Absctract loss class + """ + + @abstractmethod + def calc_loss(self, *args, **kwargs): + pass diff --git a/colossalai/nn/loss/cross_entropy_1d.py b/colossalai/nn/loss/cross_entropy_1d.py new file mode 100644 index 000000000..667c00734 --- /dev/null +++ b/colossalai/nn/loss/cross_entropy_1d.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +import torch.nn.functional as F +from torch.nn.modules.loss import _Loss + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_1d._utils import vocab_range_from_per_partition_vocab_size + + +class _VocabParallelCrossEntropy_1D(torch.autograd.Function): + + @staticmethod + def forward(ctx, vocab_parallel_logits, target): + # Maximum value along vocab dimension across all GPUs. + logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] + torch.distributed.all_reduce(logits_max, + op=torch.distributed.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.PARALLEL_1D)) + # Subtract the maximum value. + vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) + + # Get the partition's vocab indecies + partition_vocab_size = vocab_parallel_logits.size()[-1] + rank = gpc.get_local_rank(ParallelMode.PARALLEL_1D) + world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D) + vocab_start_index, vocab_end_index = vocab_range_from_per_partition_vocab_size( + partition_vocab_size, rank, world_size) + + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (target < vocab_start_index) | (target >= vocab_end_index) + masked_target = target.clone() - vocab_start_index + masked_target[target_mask] = 0 + + # Get predicted-logits = logits[target]. + # For Simplicity, we convert logits to a 2-D tensor with size + # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. + logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) + masked_target_1d = masked_target.view(-1) + arange_1d = torch.arange(start=0, end=logits_2d.size()[0], + device=logits_2d.device) + predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] + predicted_logits_1d = predicted_logits_1d.clone().contiguous() + predicted_logits = predicted_logits_1d.view_as(target) + predicted_logits[target_mask] = 0.0 + # All reduce is needed to get the chunks from other GPUs. + torch.distributed.all_reduce(predicted_logits, + op=torch.distributed.ReduceOp.SUM, + group=gpc.get_group(ParallelMode.PARALLEL_1D)) + + # Sum of exponential of logits along vocab dimension across all GPUs. + exp_logits = vocab_parallel_logits + torch.exp(vocab_parallel_logits, out=exp_logits) + sum_exp_logits = exp_logits.sum(dim=-1) + torch.distributed.all_reduce(sum_exp_logits, + op=torch.distributed.ReduceOp.SUM, + group=gpc.get_group(ParallelMode.PARALLEL_1D)) + + # Loss = log(sum(exp(logits))) - predicted-logit. + loss = torch.log(sum_exp_logits) - predicted_logits + + # Store softmax, target-mask and masked-target for backward pass. + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) + + return loss + + @staticmethod + def backward(ctx, grad_output): + # Retreive tensors from the forward path. + softmax, target_mask, masked_target_1d = ctx.saved_tensors + + # All the inputs have softmax as thier gradient. + grad_input = softmax + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = grad_input.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, end=grad_2d.size()[0], + device=grad_2d.device) + grad_2d[arange_1d, masked_target_1d] -= ( + 1.0 - target_mask.view(-1).float()) + + # Finally elementwise multiplication with the output gradients. + grad_input.mul_(grad_output.unsqueeze(dim=-1)) + + return grad_input, None + + +class LmLoss1D(_Loss): + + def forward(self, lm_logits, lm_labels, loss_mask): + lm_loss = _VocabParallelCrossEntropy_1D.apply(lm_logits, lm_labels) + lm_loss = torch.sum( + lm_loss.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum() + return lm_loss + + +class SopLoss1D(_Loss): + + def forward(self, sop_logits, sentence_order): + sop_loss = F.cross_entropy(sop_logits.view(-1, 2).float(), + sentence_order.view(-1), + ignore_index=-1) + return sop_loss + + +class BERTDualHeadLoss(_Loss): + + def __init__(self): + self.lm_loss = LmLoss1D() + self.sop_loss = SopLoss1D() + + def forward(self, lm_logits, sop_logits, lm_labels, loss_mask, sentence_order): + lm_loss = self.lm_loss(lm_logits, lm_labels, loss_mask) + sop_loss = self.sop_loss(sop_logits, sentence_order) + return lm_loss + sop_loss diff --git a/colossalai/nn/loss/cross_entropy_2d.py b/colossalai/nn/loss/cross_entropy_2d.py new file mode 100644 index 000000000..fe7ca6aa8 --- /dev/null +++ b/colossalai/nn/loss/cross_entropy_2d.py @@ -0,0 +1,128 @@ +import torch +import torch.distributed as dist +from torch.nn.modules.loss import _Loss + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_2d._utils import assert_summa_initialization, get_summa_dim_from_env +from colossalai.registry import LOSSES +from colossalai.utils import get_current_device + + +class _ParallelCrossEntropyLossFunction_2D(torch.autograd.Function): + ### Modified based on megatron.mpu.cross_entropy ### + + @staticmethod + def forward(ctx, logits, targets): + # logits: [b/q, h/q] + # labels: [b/q] + # loss: [b/q] + # vocab_parallel_logits: [b/q, s, v/q] + # target: [b/q, s] + logits_max = torch.max(logits, dim=-1)[0] + torch.distributed.all_reduce( + logits_max, + op=torch.distributed.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) + # Subtract the maximum value. + # vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) + logits = logits - logits_max.unsqueeze(dim=-1) + + vocab_size = logits.size(-1) + rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + vocab_start = rank * (vocab_size) + vocab_end = (rank + 1) * (vocab_size) - 1 + + target_mask = (targets < vocab_start) | (targets > vocab_end) + + masked_target = targets.clone() - vocab_start + masked_target[target_mask] = 0 + arange_1d = torch.arange( + start=0, end=logits.size()[0], + ) + predicted_logits = logits[arange_1d, masked_target] + predicted_logits[target_mask] = 0. + dist.all_reduce(predicted_logits, group=gpc.get_group( + ParallelMode.PARALLEL_2D_ROW)) + + exp_logits = torch.exp(logits) + sum_exp_logits = exp_logits.sum(dim=1) + dist.all_reduce(sum_exp_logits, group=gpc.get_group( + ParallelMode.PARALLEL_2D_ROW)) + + loss = torch.log(sum_exp_logits) - predicted_logits + + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + ctx.save_for_backward(exp_logits, target_mask, masked_target) + + return loss + + @staticmethod + def backward(ctx, output_grad): + # Retreive tensors from the forward path. + softmax, target_mask, masked_target = ctx.saved_tensors + + # All the inputs have softmax as their gradient. + grad_input = softmax + + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = grad_input.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, end=grad_2d.size()[0], + device=get_current_device()) + grad_2d[arange_1d, + masked_target] -= (1.0 - target_mask.view(-1).float()) + + # Finally elementwise multiplication with the output gradients. + grad_input.mul_(output_grad.unsqueeze(dim=-1)) + + return grad_input, None + + +class _ReduceByColumn(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + dist.all_reduce(input_, group=gpc.get_group( + ParallelMode.PARALLEL_2D_COL)) + return input_ + + @staticmethod + def forward(ctx, input_): + dist.all_reduce(input_, group=gpc.get_group( + ParallelMode.PARALLEL_2D_COL)) + return input_ + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +@LOSSES.register_module +class CrossEntropyLoss2D(_Loss): + """Cross entropy loss for 2D parallelism + + :param reduction: whether to average the loss, defaults to True + :type reduction: bool, optional + """ + + def __init__(self, reduction=True): + super().__init__() + assert_summa_initialization() + self.summa_dim = get_summa_dim_from_env() + self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + self.reduction_mean = reduction + + def forward(self, logits, targets): + targets = targets.chunk(self.summa_dim, dim=0)[self.row_rank] + loss = _ParallelCrossEntropyLossFunction_2D.apply( + logits, targets, + ) + if self.reduction_mean: + loss = _ReduceByColumn.apply(loss) / self.summa_dim + dist_loss = loss.mean() + + return dist_loss diff --git a/colossalai/nn/loss/cross_entropy_2p5d.py b/colossalai/nn/loss/cross_entropy_2p5d.py new file mode 100644 index 000000000..681c7d2eb --- /dev/null +++ b/colossalai/nn/loss/cross_entropy_2p5d.py @@ -0,0 +1,124 @@ +import torch +import torch.distributed as dist +from torch.nn.modules.loss import _Loss + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_2p5d._utils import assert_tesseract_initialization, \ + get_tesseract_dim_dep_from_env +from colossalai.registry import LOSSES +from colossalai.utils import get_current_device + + +class _ParallelCrossEntropyLossFunction_2p5D(torch.autograd.Function): + ### Modified based on megatron.mpu.cross_entropy ### + + @staticmethod + def forward(ctx, logits, targets): + # logits: [b/dq, h/q] + # loss: [b/dq] + # targets: [b/dq, h/q] + logits_max = torch.max(logits, dim=-1)[0] + torch.distributed.all_reduce( + logits_max, + op=torch.distributed.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) + # Subtract the maximum value. + logits = logits - logits_max.unsqueeze(dim=-1) + + vocab_size = logits.size(-1) + rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + vocab_start = rank * (vocab_size) + vocab_end = (rank + 1) * (vocab_size) - 1 + + target_mask = (targets < vocab_start) | (targets > vocab_end) + + masked_target = targets.clone() - vocab_start + masked_target[target_mask] = 0 + arange_1d = torch.arange( + start=0, end=logits.size()[0], + ) + predicted_logits = logits[arange_1d, masked_target] + predicted_logits[target_mask] = 0. + dist.all_reduce(predicted_logits, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) + + exp_logits = torch.exp(logits) + sum_exp_logits = exp_logits.sum(dim=1) + dist.all_reduce(sum_exp_logits, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) + + loss = torch.log(sum_exp_logits) - predicted_logits + + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + ctx.save_for_backward(exp_logits, target_mask, masked_target) + + return loss + + @staticmethod + def backward(ctx, output_grad): + # Retreive tensors from the forward path. + softmax, target_mask, masked_target = ctx.saved_tensors + + # All the inputs have softmax as their gradient. + grad_input = softmax + + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = grad_input.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, end=grad_2d.size()[0], + device=get_current_device()) + grad_2d[arange_1d, + masked_target] -= (1.0 - target_mask.view(-1).float()) + + # Finally elementwise multiplication with the output gradients. + grad_input.mul_(output_grad.unsqueeze(dim=-1)) + + return grad_input, None + + +class _ReduceByColDep(torch.autograd.Function): + """All-reduce the input from the model parallel region.""" + + @staticmethod + def symbolic(graph, input_): + dist.all_reduce(input_, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_XZ)) + return input_ + + @staticmethod + def forward(ctx, input_): + dist.all_reduce(input_, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_XZ)) + return input_ + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +@LOSSES.register_module +class CrossEntropyLoss2p5D(_Loss): + """Cross entropy loss for 2.5D parallelism + + :param reduction: whether to average the loss, defaults to True + :type reduction: bool, optional + """ + + def __init__(self, reduction=True): + super().__init__() + assert_tesseract_initialization() + self.xz_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ) + self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() + self.reduction_mean = reduction + + def forward(self, logits, targets): + targets = targets.chunk(self.tesseract_dim * + self.tesseract_dep, dim=0)[self.xz_rank] + loss = _ParallelCrossEntropyLossFunction_2p5D.apply( + logits, targets, + ) + if self.reduction_mean: + loss = _ReduceByColDep.apply( + loss) / self.tesseract_dim / self.tesseract_dep + dist_loss = loss.mean() + + return dist_loss diff --git a/colossalai/nn/loss/cross_entropy_3d.py b/colossalai/nn/loss/cross_entropy_3d.py new file mode 100644 index 000000000..b1ef7731b --- /dev/null +++ b/colossalai/nn/loss/cross_entropy_3d.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +import torch.distributed as dist +from torch.nn.modules.loss import _Loss + +from colossalai.communication import all_gather +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_3d._operation import Reduce_3D +from colossalai.nn.layer.parallel_3d._utils import get_last_group, get_depth_from_env +from colossalai.registry import LOSSES +from colossalai.utils import get_current_device + + +def accuracy_3d(output, target, input_parallel_mode, weight_parallel_mode): + depth = get_depth_from_env() + output_parallel_mode = get_last_group(input_parallel_mode, + weight_parallel_mode) + j = gpc.get_local_rank(input_parallel_mode) + i = gpc.get_local_rank(weight_parallel_mode) + target = torch.chunk(target, depth, dim=0)[i] + target = torch.chunk(target, depth, dim=0)[j] + output = all_gather(output, -1, output_parallel_mode) + prediction = torch.argmax(output, dim=-1) + correct = torch.sum(prediction == target) + dist.all_reduce(correct, group=gpc.get_group(input_parallel_mode)) + dist.all_reduce(correct, group=gpc.get_group(weight_parallel_mode)) + return correct.item() + + +class _ParallelCrossEntropyLossFunction_3D(torch.autograd.Function): + """ + Adapted from megatron.mpu.cross_entropy + loss[i] = -logits[i][targets] + log(sum(exp(logits[i]))) + """ + @staticmethod + def forward(ctx, logits, targets, depth, output_parallel_mode): + # logits: [b/q^2, c/q] + # labels: [b/q^2] + # loss: [b/q^2] + logits_max = torch.max(logits, dim=-1)[0] + dist.all_reduce(logits_max, + op=torch.distributed.ReduceOp.MAX, + group=gpc.get_group(output_parallel_mode)) + # Subtract the maximum value. + logits = logits - logits_max.unsqueeze(dim=-1) + + vocab_size_per_partition = logits.size()[-1] + rank = gpc.get_local_rank(output_parallel_mode) + vocab_start = rank * vocab_size_per_partition + vocab_end = (rank + 1) * vocab_size_per_partition - 1 + + # loss[i] = 0 if targets[i] < vocab_start or targets[i] > vocab_end + target_mask = (targets < vocab_start) | (targets > vocab_end) + masked_target = targets.clone() - vocab_start + masked_target[target_mask] = 0 + arange_1d = torch.arange(start=0, + end=logits.size()[0], + device=get_current_device()) + predicted_logits = logits[arange_1d, masked_target] + predicted_logits = predicted_logits.clone().contiguous().view_as( + targets) + predicted_logits[target_mask] = 0. + dist.all_reduce(predicted_logits, + group=gpc.get_group(output_parallel_mode)) + + # Loss = log(sum(exp(logits))) - predicted-logit. + exp_logits = torch.exp(logits) + sum_exp_logits = exp_logits.sum(dim=-1) + dist.all_reduce(sum_exp_logits, + group=gpc.get_group(output_parallel_mode)) + loss = torch.log(sum_exp_logits) - predicted_logits + + exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) + ctx.save_for_backward(exp_logits, target_mask, masked_target) + + return loss + + @staticmethod + def backward(ctx, output_grad): + # Retreive tensors from the forward path. + softmax, target_mask, masked_target = ctx.saved_tensors + + # All the inputs have softmax as thier gradient. + input_grad = softmax + # For simplicity, work with the 2D gradient. + partition_vocab_size = softmax.size()[-1] + grad_2d = input_grad.view(-1, partition_vocab_size) + + # Add the gradient from matching classes. + arange_1d = torch.arange(start=0, + end=grad_2d.size()[0], + device=get_current_device()) + grad_2d[arange_1d, + masked_target] -= (1.0 - target_mask.view(-1).float()) + input_grad.mul_(output_grad.unsqueeze(dim=-1)) + + return input_grad, None, None, None + + +@LOSSES.register_module +class CrossEntropyLoss3D(_Loss): + """Cross entropy loss for 3D parallelism + + :param depth: depth for 3D parallelism + :type depth: int + :param input_parallel_mode: parallel mode for input tensor + :type input_parallel_mode: ParallelMode + :param weight_parallel_mode: parallel mode for weight + :type weight_parallel_mode: ParallelMode + :param reduction: whether to average the loss, defaults to True + :type reduction: bool, optional + """ + def __init__(self, + input_parallel_mode, + weight_parallel_mode, + reduction=True): + super().__init__() + self.depth = get_depth_from_env() + self.input_parallel_mode = input_parallel_mode + self.weight_parallel_mode = weight_parallel_mode + self.output_parallel_mode = get_last_group(input_parallel_mode, + weight_parallel_mode) + self.input_rank = gpc.get_local_rank(self.input_parallel_mode) + self.weight_rank = gpc.get_local_rank(self.weight_parallel_mode) + self.reduction_mean = reduction + + def forward(self, logits, targets): + # split label partition from the entire batch + batch_size = targets.size(0) + targets = torch.chunk(targets, self.depth, dim=0)[self.weight_rank] + targets = torch.chunk(targets, self.depth, dim=0)[self.input_rank] + loss = _ParallelCrossEntropyLossFunction_3D.apply( + logits, targets, self.depth, self.output_parallel_mode) + if self.reduction_mean: + loss = loss.sum() + loss = Reduce_3D.apply(loss, self.depth, self.input_parallel_mode) + loss = Reduce_3D.apply(loss, self.depth, self.weight_parallel_mode) + loss /= batch_size + return loss + + +@LOSSES.register_module +class LabelSmoothingCrossEntropy3D(_Loss): + """ + NLL loss with label smoothing, adapted from timm.loss.LabelSmoothingCrossEntropy + + :param input_parallel_mode: parallel mode for input tensor + :type input_parallel_mode: ParallelMode + :param weight_parallel_mode: parallel mode for weight + :type weight_parallel_mode: ParallelMode + :param smoothing: label smoothing value, defaults to 0.1 + :type smoothing: float + :param reduction: whether to average the loss, defaults to True + :type reduction: bool, optional + """ + def __init__(self, + input_parallel_mode, + weight_parallel_mode, + smoothing=0.1, + reduction=True): + super().__init__() + assert smoothing < 1.0 + self.smoothing = smoothing + self.confidence = 1. - smoothing + self.depth = get_depth_from_env() + self.input_parallel_mode = input_parallel_mode + self.weight_parallel_mode = weight_parallel_mode + self.output_parallel_mode = get_last_group(input_parallel_mode, + weight_parallel_mode) + self.reduction_mean = reduction + + def forward(self, logits, targets): + # split label partition from the entire batch + j = gpc.get_local_rank(self.input_parallel_mode) + i = gpc.get_local_rank(self.weight_parallel_mode) + targets = torch.chunk(targets, self.depth, dim=0)[i] + targets = torch.chunk(targets, self.depth, dim=0)[j] + exp_logits = torch.exp(logits) + sum_exp_logits = Sum3D.apply(exp_logits, -1, depth, + self.output_parallel_mode, False) + log_probs = torch.log(sum_exp_logits) - logits + nll_loss = _ParallelCrossEntropyLossFunction_3D.apply( + logits, targets, self.depth, self.output_parallel_mode) + smooth_loss = -log_probs.mean(dim=-1) + loss = self.confidence * nll_loss + self.smoothing * smooth_loss + if self.reduction_mean: + loss = loss.sum() + loss = Reduce_3D.apply(loss, self.depth, self.input_parallel_mode) + loss = Reduce_3D.apply(loss, self.depth, self.weight_parallel_mode) + loss /= batch_size + return loss diff --git a/colossalai/nn/lr_scheduler/__init__.py b/colossalai/nn/lr_scheduler/__init__.py new file mode 100644 index 000000000..82e28ff88 --- /dev/null +++ b/colossalai/nn/lr_scheduler/__init__.py @@ -0,0 +1,13 @@ +from .cosine import CosineAnnealingLR, CosineAnnealingWarmupLR, FlatAnnealingLR, FlatAnnealingWarmupLR +from .linear import LinearWarmupLR, LinearWarmupDecay +from .multistep import MultiStepLR, MultiStepWarmupLR +from .onecycle import OneCycleLR +from .poly import PolynomialLR, PolynomialWarmupLR +from .torch import LambdaLR, MultiplicativeLR, StepLR, ExponentialLR + +__all__ = [ + 'CosineAnnealingLR', 'CosineAnnealingWarmupLR', 'FlatAnnealingLR', 'FlatAnnealingWarmupLR', 'LinearWarmupLR', + 'MultiStepLR', 'MultiStepWarmupLR', 'OneCycleLR', 'PolynomialLR', 'PolynomialWarmupLR', 'LambdaLR', + 'MultiplicativeLR', 'StepLR', + 'ExponentialLR' +] diff --git a/colossalai/nn/lr_scheduler/cosine.py b/colossalai/nn/lr_scheduler/cosine.py new file mode 100644 index 000000000..067636a3d --- /dev/null +++ b/colossalai/nn/lr_scheduler/cosine.py @@ -0,0 +1,129 @@ +from torch.optim.lr_scheduler import CosineAnnealingLR as _CosineAnnealingLR + +from colossalai.registry import LR_SCHEDULERS +from .delayed import DelayerScheduler, WarmupDelayerScheduler, WarmupScheduler + + +@LR_SCHEDULERS.register_module +class CosineAnnealingLR(_CosineAnnealingLR): + r"""Set the learning rate of each parameter group using a cosine annealing + schedule, where :math:`\eta_{max}` is set to the initial lr and + :math:`T_{cur}` is the number of epochs since the last restart in SGDR: + + .. math:: + \begin{aligned} + \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), + & T_{cur} \neq (2k+1)T_{max}; \\ + \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) + \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), + & T_{cur} = (2k+1)T_{max}. + \end{aligned} + + When last_epoch=-1, sets initial lr as lr. Notice that because the schedule + is defined recursively, the learning rate can be simultaneously modified + outside this scheduler by other operators. If the learning rate is set + solely by this scheduler, the learning rate at each step becomes: + + .. math:: + \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) + + It has been proposed in + `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only + implements the cosine annealing part of SGDR, and not the restarts. + + .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: + https://arxiv.org/abs/1608.03983 + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param eta_min: Minimum learning rate, defaults to 0 + :type eta_min: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, eta_min: int = 0, last_epoch: int = -1, **kwargs): + super().__init__(optimizer, total_steps, eta_min=eta_min, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class CosineAnnealingWarmupLR(WarmupScheduler): + """Cosine annealing learning rate scheduler with learning rate warmup. A linear warmup schedule will be applied. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param warmup_steps: number of warmup steps, defaults to 0 + :type warmup_steps: int, optional + :param eta_min: Minimum learning rate, defaults to 0 + :type eta_min: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, eta_min: int = 0, last_epoch: int = -1, + **kwargs): + base_scheduler = _CosineAnnealingLR( + optimizer, total_steps - warmup_steps, eta_min=eta_min) + super().__init__(optimizer, warmup_steps, base_scheduler, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class FlatAnnealingLR(DelayerScheduler): + """Flat and cosine annealing learning rate scheduler. The learning rate will be a fixed value before starting decay. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param pct_start: percent of steps before starting learning rate decay + :type pct_start: float + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, pct_start: float = 0.72, last_epoch: int = -1, **kwargs): + if not (0.0 <= pct_start <= 1.0): + raise ValueError( + f'pct_start must >= 0.0 and <= 1.0, got {pct_start}') + flat_steps = int(total_steps * pct_start) + anneal_steps = total_steps - flat_steps + base_scheduler = _CosineAnnealingLR( + optimizer, anneal_steps) + super().__init__(optimizer, flat_steps, base_scheduler, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class FlatAnnealingWarmupLR(WarmupDelayerScheduler): + """Flat and cosine annealing learning rate scheduler with learning rate warmup. A linear warmup schedule will be applied, and then the learning rate will be a fixed value before starting decay. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param warmup_steps: number of warmup steps, defaults to 0 + :type warmup_steps: int, optional + :param pct_start: percent of steps before starting learning rate decay + :type pct_start: float + :param eta_min: Minimum learning rate, defaults to 0 + :type eta_min: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, pct_start: float = 0.72, eta_min: int = 0, + last_epoch: int = -1, **kwargs): + if not (0.0 <= pct_start <= 1.0): + raise ValueError( + f'pct_start must >= 0.0 and <= 1.0, got {pct_start}') + flat_steps = int((total_steps - warmup_steps) * pct_start) + anneal_steps = total_steps - warmup_steps - flat_steps + base_scheduler = _CosineAnnealingLR( + optimizer, anneal_steps, eta_min=eta_min) + super().__init__(optimizer, warmup_steps, flat_steps, + base_scheduler, last_epoch=last_epoch) diff --git a/colossalai/nn/lr_scheduler/delayed.py b/colossalai/nn/lr_scheduler/delayed.py new file mode 100644 index 000000000..c8972c922 --- /dev/null +++ b/colossalai/nn/lr_scheduler/delayed.py @@ -0,0 +1,149 @@ +from torch.optim.lr_scheduler import _LRScheduler + + +class _enable_get_lr_call: + def __init__(self, o): + self.o = o + + def __enter__(self): + self.o._get_lr_called_within_step = True + return self + + def __exit__(self, type, value, traceback): + self.o._get_lr_called_within_step = False + + +class DelayerScheduler(_LRScheduler): + """ Starts with a flat lr schedule until it reaches N epochs the applies a scheduler + + :param optimizer: Wrapped optimizer. + :type optimizer: torch.optim.Optimizer + :param delay_epochs: number of epochs to keep the initial lr until starting aplying the scheduler + :type delay_epochs: int + :param after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau) + :type after_scheduler: torch.optim.lr_scheduler + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, delay_epochs, after_scheduler, last_epoch=-1): + if delay_epochs < 0: + raise ValueError(f'delay_epochs must >= 0, got {delay_epochs}') + self.delay_epochs = delay_epochs + self.after_scheduler = after_scheduler + self.finished = False + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch >= self.delay_epochs: + if not self.finished: + self.after_scheduler.base_lrs = self.base_lrs + self.finished = True + with _enable_get_lr_call(self.after_scheduler): + return self.after_scheduler.get_lr() + + return self.base_lrs + + def step(self, epoch=None): + if self.finished: + if epoch is None: + self.after_scheduler.step(None) + else: + self.after_scheduler.step(epoch - self.delay_epochs) + else: + return super(DelayerScheduler, self).step(epoch) + + +class WarmupScheduler(_LRScheduler): + """ Starts with a linear warmup lr schedule until it reaches N epochs the applies a scheduler + + :param optimizer: Wrapped optimizer. + :type optimizer: torch.optim.Optimizer + :param warmup_epochs: number of epochs to linearly warmup lr until starting aplying the scheduler + :type warmup_epochs: int + :param after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau) + :type after_scheduler: torch.optim.lr_scheduler + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, warmup_epochs, after_scheduler, last_epoch=-1): + if warmup_epochs < 0: + raise ValueError(f'warmup_epochs must >= 0, got {warmup_epochs}') + self.warmup_epochs = warmup_epochs + self.after_scheduler = after_scheduler + self.finished = False + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch >= self.warmup_epochs: + if not self.finished: + self.after_scheduler.base_lrs = self.base_lrs + # reset lr to base_lr + for group, base_lr in zip(self.optimizer.param_groups, self.base_lrs): + group['lr'] = base_lr + self.finished = True + with _enable_get_lr_call(self.after_scheduler): + return self.after_scheduler.get_lr() + + return [(self.last_epoch + 1) / (self.warmup_epochs + 1) * lr for lr in self.base_lrs] + + def step(self, epoch=None): + if self.finished: + if epoch is None: + self.after_scheduler.step(None) + else: + self.after_scheduler.step(epoch - self.warmup_epochs) + else: + return super().step(epoch) + + +class WarmupDelayerScheduler(_LRScheduler): + """ Starts with a linear warmup lr schedule until it reaches N epochs and a flat lr schedule until it reaches M epochs the applies a scheduler + + :param optimizer: Wrapped optimizer. + :type optimizer: torch.optim.Optimizer + :param warmup_epochs: number of epochs to linearly warmup lr until starting aplying the scheduler + :type warmup_epochs: int + :param delay_epochs: number of epochs to keep the initial lr until starting aplying the scheduler + :type delay_epochs: int + :param after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau) + :type after_scheduler: torch.optim.lr_scheduler + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, warmup_epochs, delay_epochs, after_scheduler, last_epoch=-1): + if delay_epochs < 0: + raise ValueError(f'delay_epochs must >= 0, got {delay_epochs}') + if warmup_epochs < 0: + raise ValueError(f'warmup_epochs must >= 0, got {warmup_epochs}') + self.warmup_epochs = warmup_epochs + self.delay_epochs = delay_epochs + self.after_scheduler = after_scheduler + self.finished = False + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if self.last_epoch >= self.warmup_epochs + self.delay_epochs: + if not self.finished: + self.after_scheduler.base_lrs = self.base_lrs + # reset lr to base_lr + for group, base_lr in zip(self.optimizer.param_groups, self.base_lrs): + group['lr'] = base_lr + self.finished = True + with _enable_get_lr_call(self.after_scheduler): + return self.after_scheduler.get_lr() + elif self.last_epoch >= self.warmup_epochs: + return self.base_lrs + + return [(self.last_epoch + 1) / self.warmup_epochs * lr for lr in self.base_lrs] + + def step(self, epoch=None): + if self.finished: + if epoch is None: + self.after_scheduler.step(None) + else: + self.after_scheduler.step(epoch - self.warmup_epochs) + else: + return super().step(epoch) diff --git a/colossalai/nn/lr_scheduler/linear.py b/colossalai/nn/lr_scheduler/linear.py new file mode 100644 index 000000000..afc68c5a7 --- /dev/null +++ b/colossalai/nn/lr_scheduler/linear.py @@ -0,0 +1,45 @@ +from torch.optim.lr_scheduler import _LRScheduler + +from colossalai.registry import LR_SCHEDULERS + + +@LR_SCHEDULERS.register_module +class LinearWarmupLR(_LRScheduler): + """Linearly warmup learning rate and then linearly decay + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param warmup_steps: number of warmup steps, defaults to 0 + :type warmup_steps: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, last_epoch: int = -1, **kwargs): + self.warmup_steps = warmup_steps + self.total_steps = total_steps + super().__init__(optimizer, last_epoch=last_epoch) + + def get_lr(self): + if self.last_epoch < self.warmup_steps: + return [(self.last_epoch + 1) / (self.warmup_steps + 1) * lr for lr in self.base_lrs] + else: + return [(self.total_steps - self.last_epoch) / (self.total_steps - self.warmup_steps) * lr for lr in + self.base_lrs] + + +@LR_SCHEDULERS.register_module +class LinearWarmupDecay(_LRScheduler): + def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, last_epoch: int = -1, **kwargs): + self.warmup_steps = int(warmup_steps) + self.total_steps = total_steps + super().__init__(optimizer, last_epoch=last_epoch) + + def get_lr(self): + if self.last_epoch < self.warmup_steps: + return [(self.last_epoch + 1) / self.warmup_steps * lr for lr in self.base_lrs] + else: + return [(self.total_steps - self.last_epoch - 1) / (self.total_steps - self.warmup_steps) * lr for lr in + self.base_lrs] diff --git a/colossalai/nn/lr_scheduler/multistep.py b/colossalai/nn/lr_scheduler/multistep.py new file mode 100644 index 000000000..46420765c --- /dev/null +++ b/colossalai/nn/lr_scheduler/multistep.py @@ -0,0 +1,70 @@ +from typing import List + +from torch.optim.lr_scheduler import MultiStepLR as _MultiStepLR + +from colossalai.registry import LR_SCHEDULERS +from .delayed import WarmupScheduler + + +@LR_SCHEDULERS.register_module +class MultiStepLR(_MultiStepLR): + """Decays the learning rate of each parameter group by gamma once the + number of epoch reaches one of the milestones. Notice that such decay can + happen simultaneously with other changes to the learning rate from outside + this scheduler. When last_epoch=-1, sets initial lr as lr. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param milestones: List of epoch indices. Must be increasing, defaults to None + :type milestones: List[int], optional + :param gamma: Multiplicative factor of learning rate decay, defaults to 0.1 + :type gamma: float, optional + :param num_steps_per_epoch: number of steps per epoch, defaults to -1 + :type num_steps_per_epoch: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, milestones: List[int] = None, gamma: float = 0.1, + num_steps_per_epoch: int = -1, last_epoch: int = -1, **kwargs): + if num_steps_per_epoch <= 0: + raise ValueError( + f'num_steps_per_epoch must > 0, got {num_steps_per_epoch}') + milestones = [v * num_steps_per_epoch for v in milestones] + super().__init__(optimizer, milestones, gamma=gamma, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class MultiStepWarmupLR(WarmupScheduler): + """Multi-step laerning rate scheduler with warmup. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param warmup_steps: number of warmup steps, defaults to 0 + :type warmup_steps: int, optional + :param milestones: List of epoch indices. Must be increasing, defaults to None + :type milestones: List[int], optional + :param gamma: Multiplicative factor of learning rate decay, defaults to 0.1 + :type gamma: float, optional + :param num_steps_per_epoch: number of steps per epoch, defaults to -1 + :type num_steps_per_epoch: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, milestones: List[int] = None, + gamma: float = 0.1, num_steps_per_epoch: int = -1, last_epoch: int = -1, **kwargs): + if len(milestones) == 0: + raise ValueError('milestones cannot be empty') + if num_steps_per_epoch <= 0: + raise ValueError( + f'num_steps_per_epoch must > 0, got {num_steps_per_epoch}') + milestones = [v * num_steps_per_epoch - warmup_steps for v in milestones if v * + num_steps_per_epoch >= warmup_steps] + base_scheduler = _MultiStepLR(optimizer, milestones=milestones, + gamma=gamma) + super().__init__(optimizer, warmup_steps, base_scheduler, last_epoch=last_epoch) diff --git a/colossalai/nn/lr_scheduler/onecycle.py b/colossalai/nn/lr_scheduler/onecycle.py new file mode 100644 index 000000000..743855470 --- /dev/null +++ b/colossalai/nn/lr_scheduler/onecycle.py @@ -0,0 +1,97 @@ +from torch.optim.lr_scheduler import OneCycleLR as _OneCycleLR + +from colossalai.registry import LR_SCHEDULERS + + +@LR_SCHEDULERS.register_module +class OneCycleLR(_OneCycleLR): + r"""Sets the learning rate of each parameter group according to the + 1cycle learning rate policy. The 1cycle policy anneals the learning + rate from an initial learning rate to some maximum learning rate and then + from that maximum learning rate to some minimum learning rate much lower + than the initial learning rate. + This policy was initially described in the paper `Super-Convergence: + Very Fast Training of Neural Networks Using Large Learning Rates`_. + + The 1cycle learning rate policy changes the learning rate after every batch. + `step` should be called after a batch has been used for training. + + This scheduler is not chainable. + + Note also that the total number of steps in the cycle can be determined in one + of two ways (listed in order of precedence): + + #. A value for total_steps is explicitly provided. + #. A number of epochs (epochs) and a number of steps per epoch + (steps_per_epoch) are provided. + In this case, the number of total steps is inferred by + total_steps = epochs * steps_per_epoch + + You must either provide a value for total_steps or provide a value for both + epochs and steps_per_epoch. + + The default behaviour of this scheduler follows the fastai implementation of 1cycle, which + claims that "unpublished work has shown even better results by using only two phases". To + mimic the behaviour of the original paper instead, set ``three_phase=True``. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param pct_start: The percentage of the cycle (in number of steps) spent increasing the learning rate, defaults to 0.3 + :type pct_start: float, optional + :param anneal_strategy: {'cos', 'linear'} + Specifies the annealing strategy: "cos" for cosine annealing, "linear" for + linear annealing, defaults to 'cos' + :type anneal_strategy: str, optional + :param cycle_momentum: If ``True``, momentum is cycled inversely + to learning rate between 'base_momentum' and 'max_momentum', defaults to True + :type cycle_momentum: bool, optional + :param base_momentum: Lower momentum boundaries in the cycle + for each parameter group. Note that momentum is cycled inversely + to learning rate; at the peak of a cycle, momentum is + 'base_momentum' and learning rate is 'max_lr', defaults to 0.85 + :type base_momentum: float, optional + :param max_momentum: Upper momentum boundaries in the cycle + for each parameter group. Functionally, + it defines the cycle amplitude (max_momentum - base_momentum). + Note that momentum is cycled inversely + to learning rate; at the start of a cycle, momentum is 'max_momentum' + and learning rate is 'base_lr', defaults to 0.95 + :type max_momentum: float, optional + :param div_factor: Determines the initial learning rate via + initial_lr = max_lr/div_factor, defaults to 25.0 + :type div_factor: float, optional + :param final_div_factor: Determines the minimum learning rate via + min_lr = initial_lr/final_div_factor, defaults to 10000.0 + :type final_div_factor: float, optional + :param last_epoch: The index of the last batch. This parameter is used when + resuming a training job. Since `step()` should be invoked after each + batch instead of after each epoch, this number represents the total + number of *batches* computed, not the total number of epochs computed. + When last_epoch=-1, the schedule is started from the beginning, defaults to -1 + :type last_epoch: int, optional + + .. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates: + https://arxiv.org/abs/1708.07120 + """ + + def __init__(self, optimizer, total_steps: int, + pct_start=0.3, + anneal_strategy='cos', + cycle_momentum=True, + base_momentum=0.85, + max_momentum=0.95, + div_factor=25.0, + final_div_factor=10000.0, + last_epoch=-1, **kwargs): + max_lrs = list(map(lambda group: group['lr'], optimizer.param_groups)) + super().__init__(optimizer, max_lrs, total_steps=total_steps, + pct_start=pct_start, + anneal_strategy=anneal_strategy, + cycle_momentum=cycle_momentum, + base_momentum=base_momentum, + max_momentum=max_momentum, + div_factor=div_factor, + final_div_factor=final_div_factor, + last_epoch=last_epoch) diff --git a/colossalai/nn/lr_scheduler/poly.py b/colossalai/nn/lr_scheduler/poly.py new file mode 100644 index 000000000..ee77b2f9b --- /dev/null +++ b/colossalai/nn/lr_scheduler/poly.py @@ -0,0 +1,65 @@ +from torch.optim.lr_scheduler import _LRScheduler + +from colossalai.registry import LR_SCHEDULERS +from .delayed import WarmupScheduler + + +@LR_SCHEDULERS.register_module +class PolynomialLR(_LRScheduler): + """Polynomial learning rate scheduler. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param end_lr: Minimum learning rate, defaults to 0.0001 + :type end_lr: float, optional + :param power: the power of polynomial, defaults to 1.0 + :type power: float, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, end_lr: float = 0.0001, power: float = 1.0, last_epoch: int = -1, + **kwargs): + if end_lr < 0: + raise ValueError(f'end_lr must >= 0, got {end_lr}') + self.total_steps = total_steps + self.end_lr = end_lr + self.power = power + super().__init__(optimizer, last_epoch=last_epoch) + + def get_lr(self): + return self._get_closed_form_lr() + + def _get_closed_form_lr(self): + return [ + (base_lr - self.end_lr) * ((1 - min(self.last_epoch, self.total_steps) / + self.total_steps) ** self.power) + self.end_lr + for base_lr in self.base_lrs + ] + + +@LR_SCHEDULERS.register_module +class PolynomialWarmupLR(WarmupScheduler): + """Polynomial learning rate scheduler with warmup. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param warmup_steps: number of warmup steps, defaults to 0 + :type warmup_steps: int, optional + :param end_lr: Minimum learning rate, defaults to 0.0001 + :type end_lr: float, optional + :param power: the power of polynomial, defaults to 1.0 + :type power: float, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps: int, warmup_steps: int = 0, end_lr: float = 0.0001, power: float = 1.0, + last_epoch: int = -1, **kwargs): + base_scheduler = PolynomialLR( + optimizer, total_steps - warmup_steps, end_lr=end_lr, power=power) + super().__init__(optimizer, warmup_steps, base_scheduler, last_epoch=last_epoch) diff --git a/colossalai/nn/lr_scheduler/torch.py b/colossalai/nn/lr_scheduler/torch.py new file mode 100644 index 000000000..3ac0121ff --- /dev/null +++ b/colossalai/nn/lr_scheduler/torch.py @@ -0,0 +1,122 @@ +from torch.optim.lr_scheduler import LambdaLR as _LambdaLR +from torch.optim.lr_scheduler import MultiplicativeLR as _MultiplicativeLR +from torch.optim.lr_scheduler import StepLR as _StepLR +from torch.optim.lr_scheduler import _LRScheduler + +from colossalai.registry import LR_SCHEDULERS + + +@LR_SCHEDULERS.register_module +class LambdaLR(_LambdaLR): + """Sets the learning rate of each parameter group to the initial lr + times a given function. When last_epoch=-1, sets initial lr as lr. + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param lr_lambda: A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups, defaults to None + :type lr_lambda: function or list, optional + :param num_steps_per_epoch: number of steps per epoch, defaults to -1 + :type num_steps_per_epoch: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps, lr_lambda=None, num_steps_per_epoch: int = -1, + last_epoch: int = -1) -> None: + def func(step): return lr_lambda(step // num_steps_per_epoch) + + super().__init__(optimizer, func, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class MultiplicativeLR(_MultiplicativeLR): + """Multiply the learning rate of each parameter group by the factor given + in the specified function. When last_epoch=-1, sets initial lr as lr + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param lr_lambda: A function which computes a multiplicative + factor given an integer parameter epoch, or a list of such + functions, one for each group in optimizer.param_groups, defaults to None + :type lr_lambda: function or list, optional + :param num_steps_per_epoch: number of steps per epoch, defaults to -1 + :type num_steps_per_epoch: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps, lr_lambda=None, num_steps_per_epoch: int = -1, + last_epoch: int = -1) -> None: + def func(step): return lr_lambda(step // num_steps_per_epoch) + + super().__init__(optimizer, func, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class StepLR(_StepLR): + """Decays the learning rate of each parameter group by gamma every + step_size epochs. Notice that such decay can happen simultaneously with + other changes to the learning rate from outside this scheduler. When + last_epoch=-1, sets initial lr as lr + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param step_size: Period of learning rate decay, defaults to 1 + :type step_size: int, optional + :param gamma: Multiplicative factor of learning rate decay, defaults to 0.1 + :type gamma: float, optional + :param num_steps_per_epoch: number of steps per epoch, defaults to -1 + :type num_steps_per_epoch: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps, step_size: int = 1, gamma: float = 0.1, num_steps_per_epoch: int = -1, + last_epoch: int = -1) -> None: + super().__init__(optimizer, step_size * num_steps_per_epoch, + gamma=gamma, last_epoch=last_epoch) + + +@LR_SCHEDULERS.register_module +class ExponentialLR(_LRScheduler): + """Decays the learning rate of each parameter group by gamma every epoch. + When last_epoch=-1, sets initial lr as lr + + :param optimizer: Wrapped optimizer + :type optimizer: torch.optim.Optimizer + :param total_steps: number of total training steps + :type total_steps: int + :param gamma: Multiplicative factor of learning rate decay, defaults to 1.0 + :type gamma: float, optional + :param num_steps_per_epoch: number of steps per epoch, defaults to -1 + :type num_steps_per_epoch: int, optional + :param last_epoch: The index of last epoch, defaults to -1 + :type last_epoch: int, optional + """ + + def __init__(self, optimizer, total_steps, gamma: float = 1.0, num_steps_per_epoch: int = -1, + last_epoch: int = -1) -> None: + self.gamma = gamma + self.num_steps_per_epoch = num_steps_per_epoch + super().__init__(optimizer, last_epoch=last_epoch) + + def get_lr(self): + if self.last_epoch == 0: + return self.base_lrs + elif (self.last_epoch + 1) % self.num_steps_per_epoch == 0: + return [group['lr'] * self.gamma + for group in self.optimizer.param_groups] + return [group['lr'] + for group in self.optimizer.param_groups] + + def _get_closed_form_lr(self): + return [base_lr * self.gamma ** (self.last_epoch // self.num_steps_per_epoch) + for base_lr in self.base_lrs] diff --git a/colossalai/nn/model/__init__.py b/colossalai/nn/model/__init__.py new file mode 100644 index 000000000..5d5ccd96e --- /dev/null +++ b/colossalai/nn/model/__init__.py @@ -0,0 +1,3 @@ +from .base_model import BaseModel +from .vanilla_resnet import VanillaResNet +from .vision_transformer import * diff --git a/colossalai/nn/model/base_model.py b/colossalai/nn/model/base_model.py new file mode 100644 index 000000000..cbe38fefa --- /dev/null +++ b/colossalai/nn/model/base_model.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC, abstractmethod + +import torch.nn as nn + +from colossalai.builder import build_layer + + +class BaseModel(nn.Module, ABC): + + def __init__(self): + super(BaseModel, self).__init__() + self.layers = nn.ModuleList() + self.layers_cfg = [] + + def build_from_cfg(self, start=None, end=None): + assert hasattr(self, 'layers_cfg'), 'Cannot find attribute layers_cfg from the module, please check the ' \ + 'spelling and if you have initialized this variable' + if start is None: + start = 0 + if end is None: + end = len(self.layers_cfg) + for cfg in self.layers_cfg[start: end]: + layer = build_layer(cfg) + self.layers.append(layer) + + @abstractmethod + def init_weights(self): + pass + + def state_dict_for_save_checkpoint(self, destination=None, prefix='', + keep_vars=False): + + """Use this function to override the state dict for + saving checkpoints.""" + return self.state_dict(destination, prefix, keep_vars) diff --git a/colossalai/nn/model/vanilla_resnet/__init__.py b/colossalai/nn/model/vanilla_resnet/__init__.py new file mode 100644 index 000000000..1740de7dc --- /dev/null +++ b/colossalai/nn/model/vanilla_resnet/__init__.py @@ -0,0 +1,3 @@ +from .resnet import VanillaResNet + +__all__ = ['VanillaResNet'] diff --git a/colossalai/nn/model/vanilla_resnet/resnet.py b/colossalai/nn/model/vanilla_resnet/resnet.py new file mode 100644 index 000000000..905889649 --- /dev/null +++ b/colossalai/nn/model/vanilla_resnet/resnet.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import List, Optional + +import torch +import torch.nn as nn +from torch import Tensor + +from colossalai.registry import LAYERS +from colossalai.registry import MODELS +from ..base_model import BaseModel + + +@MODELS.register_module +class VanillaResNet(BaseModel): + """ResNet from + `"Deep Residual Learning for Image Recognition" `_. + """ + + def __init__( + self, + num_cls: int, + block_type: str, + layers: List[int], + norm_layer_type: str = 'BatchNorm2d', + in_channels: int = 3, + groups: int = 1, + width_per_group: int = 64, + zero_init_residual: bool = False, + replace_stride_with_dilation: Optional[List[bool]] = None, + dilations=(1, 1, 1, 1) + ) -> None: + super().__init__() + + self.inplanes = 64 + self.zero_init_residual = zero_init_residual + self.blocks = layers + self.block_expansion = LAYERS.get_module(block_type).expansion + self.dilations = dilations + self.reslayer_common_cfg = dict( + type='ResLayer', + block_type=block_type, + norm_layer_type=norm_layer_type, + groups=groups, + base_width=width_per_group + ) + + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + + self.layers_cfg = [ + # conv1 + dict(type='Conv2d', + in_channels=in_channels, + out_channels=self.inplanes, + kernel_size=7, + stride=2, + padding=3, + bias=False), + # bn1 + dict( + type=norm_layer_type, + num_features=self.inplanes + ), + # relu + dict( + type='ReLU', + inplace=True + ), + # maxpool + dict( + type='MaxPool2d', + kernel_size=3, + stride=2, + padding=1 + ), + # layer 1 + dict( + inplanes=self.inplanes, + planes=64, + blocks=self.blocks[0], + dilation=self.dilations[0], + **self.reslayer_common_cfg + ), + # layer 2 + dict( + inplanes=64 * self.block_expansion, + planes=128, + blocks=self.blocks[1], + stride=2, + dilate=replace_stride_with_dilation[0], + dilation=self.dilations[1], + **self.reslayer_common_cfg + ), + # layer 3 + dict( + inplanes=128 * self.block_expansion, + planes=256, + blocks=layers[2], + stride=2, + dilate=replace_stride_with_dilation[1], + dilation=self.dilations[2], + **self.reslayer_common_cfg + ), + # layer 4 + dict( + inplanes=256 * self.block_expansion, + planes=512, + blocks=layers[3], stride=2, + dilate=replace_stride_with_dilation[2], + dilation=self.dilations[3], + **self.reslayer_common_cfg + ), + # avg pool + dict( + type='AdaptiveAvgPool2d', + output_size=(1, 1) + ), + # flatten + dict( + type='LambdaWrapper', + func=lambda mod, x: torch.flatten(x, 1) + ), + # linear + dict( + type='Linear', + in_features=512 * self.block_expansion, + out_features=num_cls + ) + ] + + def forward(self, x: Tensor): + for layer in self.layers: + x = layer(x) + return x, + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, LAYERS.get_module('ResNetBottleneck')): + # type: ignore[arg-type] + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, LAYERS.get_module('ResNetBasicBlock')): + # type: ignore[arg-type] + nn.init.constant_(m.bn2.weight, 0) diff --git a/colossalai/nn/model/vision_transformer/__init__.py b/colossalai/nn/model/vision_transformer/__init__.py new file mode 100644 index 000000000..ab9d7e640 --- /dev/null +++ b/colossalai/nn/model/vision_transformer/__init__.py @@ -0,0 +1,3 @@ +from .vision_transformer import VisionTransformerFromConfig + +__all__ = ['VisionTransformerFromConfig'] diff --git a/colossalai/nn/model/vision_transformer/vision_transformer.py b/colossalai/nn/model/vision_transformer/vision_transformer.py new file mode 100644 index 000000000..98f5cae55 --- /dev/null +++ b/colossalai/nn/model/vision_transformer/vision_transformer.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +from colossalai.registry import MODELS +from ..base_model import BaseModel + + +@MODELS.register_module +class VisionTransformerFromConfig(BaseModel): + """Vision Transformer from + `"An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale" `_. + + """ + + def __init__(self, + embedding_cfg: dict, + norm_cfg: dict, + block_cfg: dict, + head_cfg: dict, + token_fusion_cfg: dict = None, + embed_dim=768, + depth=12, + drop_path_rate=0., + tensor_splitting_cfg: dict = None): + super().__init__() + self.embed_dim = embed_dim + self.num_tokens = 1 + self.tensor_splitting_cfg = tensor_splitting_cfg + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + if token_fusion_cfg is None: + token_fusion_cfg = [] + else: + token_fusion_cfg = [token_fusion_cfg] + + self.layers_cfg = [ + embedding_cfg, + + # input tensor splitting + *self._generate_tensor_splitting_cfg(), + *token_fusion_cfg, + + # blocks + *self._generate_block_cfg( + dpr=dpr, block_cfg=block_cfg, depth=depth), + + # norm + norm_cfg, + + # head + head_cfg + ] + + def _fuse_tokens(self, x): + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_token, x), dim=1) + return x + + def _generate_block_cfg(self, dpr, depth, block_cfg): + blocks_cfg = [] + + for i in range(depth): + _cfg = block_cfg.copy() + _cfg['droppath_cfg']['drop_path'] = dpr[i] + blocks_cfg.append(_cfg) + + return blocks_cfg + + def _generate_tensor_splitting_cfg(self): + if self.tensor_splitting_cfg: + return [self.tensor_splitting_cfg] + else: + return [] + + def forward(self, x): # [512, 3, 32, 32] + for layer in self.layers: + if isinstance(x, tuple): + x = layer(*x) + else: + x = layer(x) + return x # [256, 5] + + def init_weights(self): + # TODO: add init weights + pass diff --git a/colossalai/nn/multi_tensor_apply/__init__.py b/colossalai/nn/multi_tensor_apply/__init__.py new file mode 100644 index 000000000..94d13b339 --- /dev/null +++ b/colossalai/nn/multi_tensor_apply/__init__.py @@ -0,0 +1,3 @@ +from .multi_tensor_apply import MultiTensorApply + +multi_tensor_applier = MultiTensorApply(2048 * 32) diff --git a/colossalai/nn/multi_tensor_apply/multi_tensor_apply.py b/colossalai/nn/multi_tensor_apply/multi_tensor_apply.py new file mode 100644 index 000000000..b81fb5f1c --- /dev/null +++ b/colossalai/nn/multi_tensor_apply/multi_tensor_apply.py @@ -0,0 +1,31 @@ +# modified from https://github.com/NVIDIA/apex/blob/master/apex/multi_tensor_apply/multi_tensor_apply.py + + +class MultiTensorApply(object): + available = False + warned = False + + def __init__(self, chunk_size): + try: + import colossal_C + MultiTensorApply.available = True + self.chunk_size = chunk_size + except ImportError as err: + MultiTensorApply.available = False + MultiTensorApply.import_err = err + + def check_avail(self): + if MultiTensorApply.available == False: + raise RuntimeError( + "Attempted to call MultiTensorApply method, but MultiTensorApply " + "is not available, possibly because Apex was installed without " + "--cpp_ext --cuda_ext. Original import error message:", + MultiTensorApply.import_err) + + def __call__(self, op, noop_flag_buffer, tensor_lists, *args): + self.check_avail() + + return op(self.chunk_size, + noop_flag_buffer, + tensor_lists, + *args) diff --git a/colossalai/nn/optimizer/__init__.py b/colossalai/nn/optimizer/__init__.py new file mode 100644 index 000000000..f9993c470 --- /dev/null +++ b/colossalai/nn/optimizer/__init__.py @@ -0,0 +1,14 @@ +from .fp16_optimizer import FP16Optimizer +from .fused_adam import FusedAdam +from .fused_lamb import FusedLAMB +from .fused_sgd import FusedSGD +from .lamb import Lamb +from .lars import Lars +from .zero_redundancy_optimizer_level_1 import ZeroRedundancyOptimizer_Level_1 +from .zero_redundancy_optimizer_level_2 import ZeroRedundancyOptimizer_Level_2 +from .zero_redundancy_optimizer_level_3 import ZeroRedundancyOptimizer_Level_3 + +__all__ = [ + 'ZeroRedundancyOptimizer_Level_1', 'ZeroRedundancyOptimizer_Level_2', 'ZeroRedundancyOptimizer_Level_3', + 'FusedLAMB', 'FusedAdam', 'FusedSGD', 'Lamb', 'FP16Optimizer', 'Lars' +] diff --git a/colossalai/nn/optimizer/_utils.py b/colossalai/nn/optimizer/_utils.py new file mode 100644 index 000000000..1be8ffc1b --- /dev/null +++ b/colossalai/nn/optimizer/_utils.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +from torch._six import inf + +try: + import colossal_C +except: + print('Colossalai should be built with cuda extension to use the FP16 optimizer') + +from ..multi_tensor_apply import multi_tensor_applier + +from colossalai.constants import IS_TENSOR_PARALLEL, TENSOR_PARALLEL_ATTRIBUTES +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + + +def is_model_parallel_parameter(p): + return hasattr(p, IS_TENSOR_PARALLEL) and getattr(p, IS_TENSOR_PARALLEL) + + +def _calc_l2_norm(grads): + norm = 0.0 + if len(grads) > 0: + dummy_overflow_buf = torch.cuda.IntTensor([0]) + norm, _ = multi_tensor_applier( + colossal_C.multi_tensor_l2norm, + dummy_overflow_buf, + [grads], + False # no per-parameter norm + ) + return norm + + +def _calc_lp(grads, norm_type): + norm = 0.0 + for grad in grads: + grad_norm = torch.norm(grad, norm_type) + norm += grad_norm ** norm_type + return norm + +# ======== Gradient Clipping ========= + + +def clip_grad_norm_fp32(parameters, max_norm, norm_type=2): + """Clips gradient norm of an iterable of parameters whose gradients + are in fp32. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + # Filter parameters based on: + # - grad should not be none + # - parameter should not be shared + # - should not be a replica due to tensor model parallelism + params = [] + for param in parameters: + if param.grad is not None: + # Make sure the grads are in fp32 + assert param.grad.type() == 'torch.cuda.FloatTensor' + params.append(param) + # Norm parameters. + max_norm = float(max_norm) + norm_type = float(norm_type) + + # Calculate norm. + if norm_type == inf: + total_norm = max(p.grad.data.abs().max() for p in params) + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + if gpc.is_initialized(ParallelMode.TENSOR): + # Take max across all model-parallel GPUs. + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.TENSOR)) + total_norm = total_norm_cuda[0].item() + else: + tensor_parallel_grads = [] + no_tensor_parallel_grads = [] + for p in params: + if is_model_parallel_parameter(p): + tensor_parallel_grads.append(p.grad.data) + else: + no_tensor_parallel_grads.append(p.grad.data) + if norm_type == 2.0: + tensor_parallel_norm = _calc_l2_norm( + tensor_parallel_grads) ** norm_type + no_tensor_parallel_norm = _calc_l2_norm( + no_tensor_parallel_grads) ** norm_type + else: + tensor_parallel_norm = _calc_lp(tensor_parallel_grads, norm_type) + no_tensor_parallel_grads = _calc_lp( + no_tensor_parallel_grads, norm_type) + if gpc.is_initialized(ParallelMode.TENSOR): + # Sum across all model-parallel GPUs. + torch.distributed.all_reduce(tensor_parallel_norm, + op=torch.distributed.ReduceOp.SUM, + group=gpc.get_group(ParallelMode.TENSOR)) + total_norm = (tensor_parallel_norm + + no_tensor_parallel_norm) ** (1.0 / norm_type) + if type(total_norm) == 'torch.cuda.FloatTensor': + total_norm = total_norm.item() + + # Scale. + clip_coeff = max_norm / (total_norm + 1.0e-6) + if clip_coeff < 1.0: + grads = [p.grad.detach() for p in params] + dummy_overflow_buf = torch.cuda.IntTensor([0]) + multi_tensor_applier(colossal_C.multi_tensor_scale, + dummy_overflow_buf, + [grads, grads], + clip_coeff) + + return total_norm + + +def count_zeros_fp32(parameters): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + # Filter parameters based on: + # - grad should not be none + # - parameter should not be shared + # - should not be a replica due to tensor model parallelism + total_num_zeros = 0.0 + for param in parameters: + grad_not_none = param.grad is not None + is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param) + if grad_not_none and is_not_tp_duplicate: + grad = param.grad.detach() + num_zeros = grad.numel() - torch.count_nonzero(grad) + total_num_zeros = num_zeros + total_num_zeros + + # Sum across all model-parallel GPUs. + torch.distributed.all_reduce(total_num_zeros, + op=torch.distributed.ReduceOp.SUM, + group=gpc.get_group(ParallelMode.TENSOR)) + total_num_zeros = total_num_zeros.item() + + return total_num_zeros + + +def copy_tensor_parallel_attributes(src_tensor, dst_tensor): + for attr in TENSOR_PARALLEL_ATTRIBUTES: + if hasattr(src_tensor, attr): + val = getattr(src_tensor, attr) + setattr(dst_tensor, attr, val) + + +def param_is_not_tensor_parallel_duplicate(param): + return (hasattr(param, IS_TENSOR_PARALLEL) and + getattr(param, IS_TENSOR_PARALLEL)) or ( + gpc.get_local_rank(ParallelMode.TENSOR) == 0) diff --git a/colossalai/nn/optimizer/fp16_optimizer.py b/colossalai/nn/optimizer/fp16_optimizer.py new file mode 100644 index 000000000..c64a732c9 --- /dev/null +++ b/colossalai/nn/optimizer/fp16_optimizer.py @@ -0,0 +1,507 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +try: + import colossal_C +except: + print('Colossalai should be built with cuda extension to use the FP16 optimizer') + +from torch.optim import Optimizer + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.logging import get_global_dist_logger +from colossalai.registry import OPTIMIZER_WRAPPERS +from colossalai.utils import print_rank_0 +from ._utils import copy_tensor_parallel_attributes, clip_grad_norm_fp32, count_zeros_fp32 +from ..multi_tensor_apply import multi_tensor_applier + + +def _zero_grad_group_helper(group, set_to_none): + """Zero out the gradient for a group of parameters. + Note: copied from torch.optim.optimizer.""" + for param in group: + if param.grad is not None: + if set_to_none: + param.grad = None + else: + if param.grad.grad_fn is not None: + param.grad.detach_() + else: + param.grad.requires_grad_(False) + param.grad.zero_() + + +def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): + """Use multi-tensor-applier to copy values from one list to another. + We don't have a blfoat16 implementation so for now if the overflow_buf + is not provided, we default back to simple loop copy to be compatible + with bfloat16.""" + if overflow_buf: + overflow_buf.fill_(0) + # Scaling with factor `1.0` is equivalent to copy. + multi_tensor_applier(colossal_C.multi_tensor_scale, + overflow_buf, + [this, that], + 1.0) + else: + for this_, that_ in zip(this, that): + that_.copy_(this_) + + +class DynamicGradScaler: + + def __init__(self, + initial_scale, + min_scale, + growth_factor, + backoff_factor, + growth_interval, + hysteresis, + max_scale: int = None): + """"Grad scaler with dynamic scale that gets adjusted + during training.""" + assert initial_scale > 0.0 + self._scale = torch.cuda.FloatTensor([initial_scale]) + + # Lower bound on the scale. + assert min_scale > 0.0 + assert min_scale <= initial_scale + self.min_scale = torch.cuda.FloatTensor([min_scale]) + # Growth and backoff factors for the scale. + assert growth_factor > 1.0 + self.growth_factor = torch.cuda.FloatTensor([growth_factor]) + assert backoff_factor < 1.0 + assert backoff_factor > 0.0 + self.backoff_factor = torch.cuda.FloatTensor([backoff_factor]) + # Interval over which if we don't see any inf/nan, + # we will scale the grad scale by the growth factor. + assert growth_interval > 0 + self.growth_interval = growth_interval + # Number of inf/nans we should see before scaling down + # the grad scale by the backoff factor. + assert hysteresis > 0 + self.hysteresis = hysteresis + if max_scale is not None: + assert max_scale > 1 and initial_scale <= max_scale + self._max_scale = max_scale + + # Trackers. + self._growth_tracker = 0 + self._hysteresis_tracker = self.hysteresis + + self._logger = get_global_dist_logger() + + @property + def scale(self): + return self._scale + + @property + def inv_scale(self): + return self._scale.double().reciprocal().float() + + def update(self, found_inf): + + # If we have an inf/nan, growth tracker is set to 0 + # and hysterisis tracker is reduced by 1. + if found_inf: + self._growth_tracker = 0 + self._hysteresis_tracker -= 1 + # Now if we are out of hysteresis count, scale down the loss. + if self._hysteresis_tracker <= 0: + self._scale = torch.max(self._scale * self.backoff_factor, + self.min_scale) + self._logger.info(f'overflow occurs, loss scale is adjusted to {self._scale}') + else: + # If there is no nan/inf, increment the growth tracker. + self._growth_tracker += 1 + # If we have had enough consequitive intervals with no nan/inf: + if self._growth_tracker == self.growth_interval: + # Reset the tracker and hysteresis trackers, + self._growth_tracker = 0 + self._hysteresis_tracker = self.hysteresis + # and scale up the loss scale. + if self._max_scale is not None and self._scale >= self._max_scale: + self._logger.info( + f'Current loss scale {self._scale} has reached the max scale {self._max_scale} allowed') + else: + self._scale = self._scale * self.growth_factor + self._logger.info(f'no consecutive overflow, loss scale is adjusted to {self._scale}') + + def state_dict(self): + state_dict = {} + state_dict['max_scale'] = self._max_scale + state_dict['scale'] = self._scale + state_dict['growth_tracker'] = self._growth_tracker + state_dict['hysteresis_tracker'] = self._hysteresis_tracker + return state_dict + + def load_state_dict(self, state_dict): + self._scale = state_dict['scale'].cuda(torch.cuda.current_device()) + self._growth_tracker = state_dict['growth_tracker'] + self._hysteresis_tracker = state_dict['hysteresis_tracker'] + self._max_scale = state_dict['max_scale'] + + +@OPTIMIZER_WRAPPERS.register_module +class FP16Optimizer(Optimizer): + """Float16 optimizer for fp16 and bf16 data types. + + Arguments: + optimizer: base optimizer such as Adam or SGD + clip_grad: clip gradeints with this global L2 norm. Note + that clipping is ignored if clip_grad == 0 + log_num_zeros_in_grad: return number of zeros in the gradients. + params_have_main_grad: flag indicating if parameters have + a `main_grad` field. If this is set, we are assuming + that the model parameters are store in the `main_grad` + field instead of the typical `grad` field. This happens + for the DDP cases where there is a contihuous buffer + holding the gradients. For example for bfloat16, we want + to do gradient accumulation and all-reduces in float32 + and as a result we store those gradients in the main_grad. + Note that main grad is not necessarily in float32. + bf16: if true, the model is running in bfloat16. + grad_scaler: used for scaling gradients. Note that this can be + None. This case happens when `bf16 = True` and we don't + use any loss scale. Note that for `bf16 = True`, we can have + a constnat gradient scaler. Also for `bf16 = False`, we + always require a grad scaler. + """ + + def __init__(self, + optimizer, + clip_grad=0, + log_num_zeros_in_grad=False, + initial_scale=2 ** 32, + min_scale=1, + growth_factor=2, + backoff_factor=0.5, + growth_interval=1000, + hysteresis=2, + max_scale: int = 2 ** 32): + # default args for compatibility + bf16 = False + params_have_main_grad = False + + # have a defaults for compatibility with pytorch optim + self.defaults = optimizer.defaults + + # log config + self._logger = get_global_dist_logger() + self._logger.info(f"\n========= FP16 Optimizer Config =========\n" + f"Optimizer: {optimizer.__class__.__name__}\n" + f"clip_grad = {clip_grad}\n" + f"log_num_zeros_in_grad = {log_num_zeros_in_grad}\n" + f"initial_scale = {initial_scale}\n" + f"min_scale = {min_scale}\n" + f"growth_factor = {growth_factor}\n" + f"backoff_factor = {backoff_factor}\n" + f"growth_interval = {growth_interval}\n" + f"hysteresis = {hysteresis}\n" + f"==========================================", ranks=[0]) + + """Input optimizer is the base optimizer for example Adam.""" + self.optimizer = optimizer + assert self.optimizer, 'no optimizer is provided.' + # Set gradient clipping and logging params. + self.clip_grad = clip_grad + self.log_num_zeros_in_grad = log_num_zeros_in_grad + self.params_have_main_grad = params_have_main_grad + + self.bf16 = bf16 + self.grad_scaler = DynamicGradScaler( + initial_scale=initial_scale, + min_scale=min_scale, + growth_factor=growth_factor, + backoff_factor=backoff_factor, + growth_interval=growth_interval, + hysteresis=hysteresis, + max_scale=max_scale + ) + + # None grad scaler is only supported for bf16. + if self.grad_scaler is None: + assert self.bf16, 'fp16 expects a grad scaler.' + + # Tensor used to determine if a nan/if has happend. + # Any non-zero value indicates inf/nan. + # Note that we keep this for the cases that grad scaler is none. + # We still record nan/inf if we have a bfloat16 with a grad scaler. + if self.grad_scaler: + self.found_inf = torch.cuda.FloatTensor([0.0]) + + # Dummy tensor needed for apex multi-apply tensor. + # For bfloat, we don't have multi-tensor apply and for now + # we set it to none so the multi-tensor apply gets ignored. + if bf16: + self._dummy_overflow_buf = None + else: + self._dummy_overflow_buf = torch.cuda.IntTensor([0]) + + # In case grad scaler is not passed, define the unity scale. + if self.grad_scaler is None: + self._scale_one = torch.cuda.FloatTensor([1.0]) + + # ====================== + # main parameter stuff + # ====================== + + # Three groups of parameters: + # float16_groups: original float16 parameters + # fp32_from_float16_groups: fp32 copy of float16 parameters + # fp32_from_fp32_groups: original fp32 parameters + self.float16_groups = [] + self.fp32_from_float16_groups = [] + self.fp32_from_fp32_groups = [] + + # For all the groups in the original optimizer: + for param_group in self.optimizer.param_groups: + float16_params_this_group = [] + fp32_params_this_group = [] + fp32_from_float16_params_this_group = [] + # For all the parameters in this group: + for i, param in enumerate(param_group['params']): + if param.requires_grad: + # float16 params: + if param.type() in ['torch.cuda.HalfTensor', + 'torch.cuda.BFloat16Tensor']: + float16_params_this_group.append(param) + # Create a copy + main_param = param.detach().clone().float() + # Copy tensor model parallel attributes. + copy_tensor_parallel_attributes(param, main_param) + + # if hasattr(param, 'shared'): + # main_param.shared = param.shared + + # Replace the optimizer params with the new fp32 copy. + param_group['params'][i] = main_param + fp32_from_float16_params_this_group.append(main_param) + # Reset existing state dict key to the new main param. + if param in self.optimizer.state: + self.optimizer.state[main_param] \ + = self.optimizer.state.pop(param) + + # fp32 params. + elif param.type() == 'torch.cuda.FloatTensor': + fp32_params_this_group.append(param) + param_group['params'][i] = param + else: + raise TypeError('Wrapped parameters must be one of ' + 'torch.cuda.FloatTensor, ' + 'torch.cuda.HalfTensor, or ' + 'torch.cuda.BFloat16Tensor. ' + 'Received {}'.format(param.type())) + + self.float16_groups.append(float16_params_this_group) + self.fp32_from_float16_groups.append( + fp32_from_float16_params_this_group) + self.fp32_from_fp32_groups.append(fp32_params_this_group) + + # Leverage state_dict() and load_state_dict() to + # recast preexisting per-param state tensors + self.optimizer.load_state_dict(self.optimizer.state_dict()) + + def zero_grad(self, set_to_none=False): + """We only need to zero the model related parameters, i.e., + float16_groups & fp32_from_fp32_groups.""" + for group in self.float16_groups: + _zero_grad_group_helper(group, set_to_none) + for group in self.fp32_from_fp32_groups: + _zero_grad_group_helper(group, set_to_none) + + def get_loss_scale(self): + if self.grad_scaler is None: + return self._scale_one + return self.grad_scaler.scale + + def _copy_model_grads_to_main_grads(self): + # This only needs to be done for the float16 group. + for model_group, main_group in zip(self.float16_groups, + self.fp32_from_float16_groups): + for model_param, main_param in zip(model_group, main_group): + if self.params_have_main_grad: + main_param.grad = model_param.main_grad.float() + else: + if model_param.grad is not None: + main_param.grad = model_param.grad.float() + # For fp32 grads, we need to reset the grads to main grad. + if self.params_have_main_grad: + for model_group in self.fp32_from_fp32_groups: + for model_param in model_group: + model_param.grad = model_param.main_grad + + def _unscale_main_grads_and_check_for_nan(self): + main_grads = [] + # fp32 params fromm float16 ones. + for main_group in self.fp32_from_float16_groups: + for main_param in main_group: + if main_param.grad is not None: + main_grads.append(main_param.grad.data) + # Append fp32 parameters. + for main_group in self.fp32_from_fp32_groups: + for main_param in main_group: + if main_param.grad is not None: + main_grads.append(main_param.grad.data) + # Reset found inf. + self.found_inf.fill_(0.0) + # Unscale and set found inf/nan + torch._amp_foreach_non_finite_check_and_unscale_( + main_grads, self.found_inf, self.grad_scaler.inv_scale) + # Update across all model parallel instances. + torch.distributed.all_reduce(self.found_inf, + op=torch.distributed.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.TENSOR)) + + # Check for nan. + found_inf_flag = (self.found_inf.item() > 0) + return found_inf_flag + + def _get_model_and_main_params_data_float16(self): + model_data = [] + main_data = [] + for model_group, main_group in zip(self.float16_groups, + self.fp32_from_float16_groups): + for model_param, main_param in zip(model_group, main_group): + model_data.append(model_param.data) + main_data.append(main_param.data) + return model_data, main_data + + def _copy_main_params_to_model_params(self): + # Only needed for the float16 params. + model_data, main_data = self._get_model_and_main_params_data_float16() + _multi_tensor_copy_this_to_that(this=main_data, that=model_data, + overflow_buf=self._dummy_overflow_buf) + + def _copy_model_params_to_main_params(self): + # Only needed for the float16 params. + model_data, main_data = self._get_model_and_main_params_data_float16() + _multi_tensor_copy_this_to_that(this=model_data, that=main_data, + overflow_buf=self._dummy_overflow_buf) + + def reload_model_params(self): + self._copy_model_params_to_main_params() + + @torch.no_grad() + def step(self): + # for param_group in self.float16_groups: + # for param in param_group: + # print(param.grad is None) + + # Copy gradients from model params to main params. + self._copy_model_grads_to_main_grads() + + # Do unscale, check for inf, and update grad scaler only for + # the case that grad scaler is provided. + if self.grad_scaler: + + # Unscale and check for inf/nan. + found_inf_flag = self._unscale_main_grads_and_check_for_nan() + + # We are done with scaling gradients + # so we can update the loss scale. + self.grad_scaler.update(found_inf_flag) + + # If we found inf/nan, skip the update. + if found_inf_flag: + return False, None, None + + # Clip the main gradients. + grad_norm = None + if self.clip_grad > 0.0: + grad_norm = self.clip_grad_norm(self.clip_grad) + + # count the zeros in the grads + num_zeros_in_grad = self.count_zeros() if \ + self.log_num_zeros_in_grad else None + + # Step the optimizer. + self.optimizer.step() + + # Update params from main params. + self._copy_main_params_to_model_params() + + # Successful update. + return True, grad_norm, num_zeros_in_grad + + def state_dict(self): + state_dict = {} + state_dict['optimizer'] = self.optimizer.state_dict() + if self.grad_scaler: + state_dict['grad_scaler'] = self.grad_scaler.state_dict() + state_dict['fp32_from_fp16_params'] = self.fp32_from_float16_groups + return state_dict + + def load_state_dict(self, state_dict): + # Optimizer. + optimizer_key = 'optimizer' + if optimizer_key not in state_dict: + optimizer_key = 'optimizer_state_dict' + print_rank_0('***WARNING*** loading optimizer from ' + 'an old checkpoint ...') + self.optimizer.load_state_dict(state_dict[optimizer_key]) + + # Grad scaler. + if 'grad_scaler' not in state_dict: + print_rank_0('***WARNING*** found an old checkpoint, will not ' + 'load grad scaler ...') + else: + if self.grad_scaler: + self.grad_scaler.load_state_dict(state_dict['grad_scaler']) + else: + print_rank_0('***WARNING*** fould the grad scaler in the ' + 'checkpoint but it is None in the class. ' + 'Skipping loading grad scaler ...') + + # Copy data for the main params. + fp32_from_float16_params_key = 'fp32_from_fp16_params' + if fp32_from_float16_params_key not in state_dict: + fp32_from_float16_params_key = 'fp32_from_fp16' + for current_group, saved_group in zip( + self.fp32_from_float16_groups, + state_dict[fp32_from_float16_params_key]): + for current_param, saved_param in zip(current_group, saved_group): + current_param.data.copy_(saved_param.data) + + def get_parameters(self): + params = [] + for param_group in self.optimizer.param_groups: + for param in param_group['params']: + params.append(param) + return params + + def clip_grad_norm(self, clip_grad): + params = self.get_parameters() + return clip_grad_norm_fp32(params, clip_grad) + + def count_zeros(self): + params = self.get_parameters() + return count_zeros_fp32(params) + + def scale_loss(self, loss): + """Simple scaling.""" + return self.get_loss_scale() * loss + + # Promote state so it can be retrieved or set via + # "optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via + # "optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) diff --git a/colossalai/nn/optimizer/fused_adam.py b/colossalai/nn/optimizer/fused_adam.py new file mode 100644 index 000000000..5ab31b363 --- /dev/null +++ b/colossalai/nn/optimizer/fused_adam.py @@ -0,0 +1,163 @@ +# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_adam.py +import torch + +from colossalai.registry import OPTIMIZERS +from ..multi_tensor_apply import multi_tensor_applier + + +@OPTIMIZERS.register_module +class FusedAdam(torch.optim.Optimizer): + """Implements Adam algorithm. + + Currently GPU-only. Requires ColossalAI to be installed via + ``pip install -v --no-cache-dir --global-option="--cuda_ext" ./``. + + This version of fused Adam implements 2 fusions. + + * Fusion of the Adam update's elementwise operations + * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. + + :class:`colossalai.nn.optimizer.FusedAdam` may be used as a drop-in replacement for ``torch.optim.AdamW``, + or ``torch.optim.Adam`` with ``adam_w_mode=False`` + + :class:`colossalai.nn.optimizer.FusedAdam` may be used with or without Amp. + + Adam was been proposed in `Adam: A Method for Stochastic Optimization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in FusedAdam! + adam_w_mode (boolean, optional): Apply L2 regularization or weight decay + True for decoupled weight decay(also known as AdamW) (default: True) + set_grad_none (bool, optional): whether set grad to None when zero_grad() + method is called. (default: True) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, bias_correction=True, + betas=(0.9, 0.999), eps=1e-8, adam_w_mode=True, + weight_decay=0., amsgrad=False, set_grad_none=True): + + if amsgrad: + raise RuntimeError( + 'FusedAdam does not support the AMSGrad variant.') + defaults = dict(lr=lr, bias_correction=bias_correction, + betas=betas, eps=eps, weight_decay=weight_decay) + super(FusedAdam, self).__init__(params, defaults) + self.adam_w_mode = 1 if adam_w_mode else 0 + self.set_grad_none = set_grad_none + if multi_tensor_applier.available: + import colossal_C + # Skip buffer + self._dummy_overflow_buf = torch.cuda.IntTensor([0]) + self.multi_tensor_adam = colossal_C.multi_tensor_adam + else: + raise RuntimeError( + 'apex.optimizers.FusedAdam requires cuda extensions') + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(FusedAdam, self).zero_grad() + + def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + + The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes. + """ + if any(p is not None for p in [grads, output_params, scale, grad_norms]): + raise RuntimeError( + 'FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.') + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + # create lists for multi-tensor apply + g_16, p_16, m_16, v_16 = [], [], [], [] + g_32, p_32, m_32, v_32 = [], [], [], [] + + for p in group['params']: + if p.grad is None: + continue + if p.grad.data.is_sparse: + raise RuntimeError( + 'FusedAdam does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + # State initialization + if len(state) == 0: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if p.dtype == torch.float16: + g_16.append(p.grad.data) + p_16.append(p.data) + m_16.append(state['exp_avg']) + v_16.append(state['exp_avg_sq']) + elif p.dtype == torch.float32: + g_32.append(p.grad.data) + p_32.append(p.data) + m_32.append(state['exp_avg']) + v_32.append(state['exp_avg_sq']) + else: + raise RuntimeError('FusedAdam only support fp16 and fp32.') + + if (len(g_16) > 0): + multi_tensor_applier(self.multi_tensor_adam, + self._dummy_overflow_buf, + [g_16, p_16, m_16, v_16], + group['lr'], + beta1, + beta2, + group['eps'], + group['step'], + self.adam_w_mode, + bias_correction, + group['weight_decay']) + if (len(g_32) > 0): + multi_tensor_applier(self.multi_tensor_adam, + self._dummy_overflow_buf, + [g_32, p_32, m_32, v_32], + group['lr'], + beta1, + beta2, + group['eps'], + group['step'], + self.adam_w_mode, + bias_correction, + group['weight_decay']) + + return loss diff --git a/colossalai/nn/optimizer/fused_lamb.py b/colossalai/nn/optimizer/fused_lamb.py new file mode 100644 index 000000000..14b1167a9 --- /dev/null +++ b/colossalai/nn/optimizer/fused_lamb.py @@ -0,0 +1,212 @@ +# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_lamb.py +import torch + +from colossalai.registry import OPTIMIZERS +from ..multi_tensor_apply import multi_tensor_applier + + +@OPTIMIZERS.register_module +class FusedLAMB(torch.optim.Optimizer): + """Implements LAMB algorithm. + + Currently GPU-only. Requires ColossalAI to be installed via + ``pip install -v --no-cache-dir --global-option="--cuda_ext" ./``. + + This version of fused LAMB implements 2 fusions. + + * Fusion of the LAMB update's elementwise operations + * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. + + :class:`colossalai.nn.optimizer.FusedLAMB`'s usage is identical to any ordinary Pytorch optimizer + + :class:`colossalai.nn.optimizer.FusedLAMB` may be used with or without Amp. + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + NOT SUPPORTED now! (default: False) + adam_w_mode (boolean, optional): Apply L2 regularization or weight decay + True for decoupled weight decay(also known as AdamW) (default: True) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + set_grad_none (bool, optional): whether set grad to None when zero_grad() + method is called. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm + (default: 1.0) + use_nvlamb (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, bias_correction=True, + betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01, + amsgrad=False, adam_w_mode=True, + grad_averaging=True, set_grad_none=True, + max_grad_norm=1.0, use_nvlamb=False): + if amsgrad: + raise RuntimeError( + 'FusedLAMB does not support the AMSGrad variant.') + defaults = dict(lr=lr, bias_correction=bias_correction, + betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, + max_grad_norm=max_grad_norm) + super(FusedLAMB, self).__init__(params, defaults) + if multi_tensor_applier.available: + import colossal_C + self.multi_tensor_l2norm = colossal_C.multi_tensor_l2norm + # Skip buffer + self._dummy_overflow_buf = torch.tensor( + [0], dtype=torch.int, device=self.param_groups[0]["params"][0].device) + self.multi_tensor_lamb = colossal_C.multi_tensor_lamb + else: + raise RuntimeError( + 'apex.optimizers.FusedLAMB requires cuda extensions') + + self.adam_w_mode = 1 if adam_w_mode else 0 + self.set_grad_none = set_grad_none + self.use_nvlamb = use_nvlamb + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(FusedLAMB, self).zero_grad() + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + # create separate grad lists for fp32 and fp16 params + g_all_32, g_all_16 = [], [] + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + if p.dtype == torch.float32: + g_all_32.append(p.grad.data) + elif p.dtype == torch.float16: + g_all_16.append(p.grad.data) + else: + raise RuntimeError('FusedLAMB only support fp16 and fp32.') + + device = self.param_groups[0]["params"][0].device + g_norm_32, g_norm_16 = torch.zeros( + 1, device=device), torch.zeros(1, device=device) + # compute grad norm for two lists + if len(g_all_32) > 0: + g_norm_32 = multi_tensor_applier(self.multi_tensor_l2norm, + self._dummy_overflow_buf, + [g_all_32], False)[0] + if len(g_all_16) > 0: + g_norm_16 = multi_tensor_applier(self.multi_tensor_l2norm, + self._dummy_overflow_buf, + [g_all_16], False)[0] + + # blend two grad norms to get global grad norm + global_grad_norm = multi_tensor_applier(self.multi_tensor_l2norm, + self._dummy_overflow_buf, + [[g_norm_32, g_norm_16]], + False)[0] + max_grad_norm = self.defaults['max_grad_norm'] + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + # create lists for multi-tensor apply + g_16, p_16, m_16, v_16 = [], [], [], [] + g_32, p_32, m_32, v_32 = [], [], [], [] + + for p in group['params']: + if p.grad is None: + continue + if p.grad.data.is_sparse: + raise RuntimeError( + 'FusedLAMB does not support sparse gradients, please consider SparseAdam instead') + + state = self.state[p] + # State initialization + if len(state) == 0: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if p.dtype == torch.float16: + g_16.append(p.grad.data) + p_16.append(p.data) + m_16.append(state['exp_avg']) + v_16.append(state['exp_avg_sq']) + elif p.dtype == torch.float32: + g_32.append(p.grad.data) + p_32.append(p.data) + m_32.append(state['exp_avg']) + v_32.append(state['exp_avg_sq']) + else: + raise RuntimeError('FusedLAMB only support fp16 and fp32.') + + if (len(g_16) > 0): + multi_tensor_applier(self.multi_tensor_lamb, + self._dummy_overflow_buf, + [g_16, p_16, m_16, v_16], + group['lr'], + beta1, + beta2, + group['eps'], + group['step'], + bias_correction, + group['weight_decay'], + grad_averaging, + self.adam_w_mode, + global_grad_norm, + max_grad_norm, + self.use_nvlamb) + if (len(g_32) > 0): + multi_tensor_applier(self.multi_tensor_lamb, + self._dummy_overflow_buf, + [g_32, p_32, m_32, v_32], + group['lr'], + beta1, + beta2, + group['eps'], + group['step'], + bias_correction, + group['weight_decay'], + grad_averaging, + self.adam_w_mode, + global_grad_norm, + max_grad_norm, + self.use_nvlamb) + + return loss diff --git a/colossalai/nn/optimizer/fused_sgd.py b/colossalai/nn/optimizer/fused_sgd.py new file mode 100644 index 000000000..3950c40be --- /dev/null +++ b/colossalai/nn/optimizer/fused_sgd.py @@ -0,0 +1,227 @@ +# modified from https://github.com/NVIDIA/apex/blob/master/apex/optimizers/fused_sgd.py +import torch +from torch.optim.optimizer import Optimizer, required + +from colossalai.registry import OPTIMIZERS +from ..multi_tensor_apply import multi_tensor_applier + + +@OPTIMIZERS.register_module +class FusedSGD(Optimizer): + r"""Implements stochastic gradient descent (optionally with momentum). + + Currently GPU-only. Requires ColossalAI to be installed via + ``pip install -v --no-cache-dir --global-option="--cuda_ext" ./``. + + This version of fused SGD implements 2 fusions. + + * Fusion of the SGD update's elementwise operations + * A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches. + + :class:`colossalai.nn.optimizer.FusedSGD` may be used as a drop-in replacement for ``torch.optim.SGD`` + + :class:`colossalai.nn.optimizer.FusedSGD` may be used with or without Amp. + + Nesterov momentum is based on the formula from + `On the importance of initialization and momentum in deep learning`__. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float): learning rate + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + + __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf + + .. note:: + The implementation of SGD with Momentum/Nesterov subtly differs from + Sutskever et. al. and implementations in some other frameworks. + Considering the specific case of Momentum, the update can be written as + + .. math:: + v = \rho * v + g \\ + p = p - lr * v + + where p, g, v and :math:`\rho` denote the parameters, gradient, + velocity, and momentum respectively. + This is in contrast to Sutskever et. al. and + other frameworks which employ an update of the form + + .. math:: + v = \rho * v + lr * g \\ + p = p - v + + The Nesterov version is analogously modified. + """ + + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, + wd_after_momentum=False, + materialize_master_grads=True, + set_grad_none=False): + if lr is not required and lr < 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if momentum < 0.0: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if weight_decay < 0.0: + raise ValueError( + "Invalid weight_decay value: {}".format(weight_decay)) + + defaults = dict(lr=lr, momentum=momentum, dampening=dampening, + weight_decay=weight_decay, nesterov=nesterov) + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError( + "Nesterov momentum requires a momentum and zero dampening") + super(FusedSGD, self).__init__(params, defaults) + + self.wd_after_momentum = wd_after_momentum + self.materialize_master_grads = materialize_master_grads + self.most_recent_scale = 1.0 + self.scale_set_by_backward = False + self.set_grad_none = set_grad_none + + if multi_tensor_applier.available: + import colossal_C + # Skip buffer + self._dummy_overflow_buf = torch.tensor( + [0], dtype=torch.int, device=self.param_groups[0]["params"][0].device) + self.multi_tensor_sgd = colossal_C.multi_tensor_sgd + else: + raise RuntimeError( + 'apex.optimizers.FusedSGD requires cuda extensions') + + def __setstate__(self, state): + super(FusedSGD, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('nesterov', False) + + def zero_grad(self): + if self.set_grad_none: + for group in self.param_groups: + for p in group['params']: + p.grad = None + else: + super(FusedSGD, self).zero_grad() + + def get_momentums(self, params): + momentums = [] + first_run = True + for p in params: + param_state = self.state[p] + # torch.optim.SGD initializes momentum in the main loop, we have + # to do it here, and track whether or not we've done so, so that + # momentum application can be skipped in the main kernel. + if 'momentum_buffer' not in param_state: + first_run = True + buf = param_state['momentum_buffer'] = torch.zeros_like(p.data) + momentums.append(buf) + else: + first_run = False + momentums.append(param_state['momentum_buffer']) + return momentums, first_run + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + explicit_master_params = (hasattr(self, "_amp_stash") and + hasattr(self._amp_stash, "fp32_from_fp16_groups")) + + for gid, group in enumerate(self.param_groups): + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + # For each group, there are 3 possible combinations we need to consider: + # grad_type, param_to_update_type, momentum_type, requires_fp16_model_copy + # 1. fp16, fp16, fp16, No + # 2. fp32, fp32, fp32, No + # 3. fp16, fp32, fp32, Yes + + first_runs = [True, True] + + # I think a bit of code divergence in exchange for naming clarity is worthwhile + if explicit_master_params: + stash = self._amp_stash + + fp32_params = [ + p for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None] + fp32_grads = [ + p.grad for p in stash.fp32_from_fp32_groups[gid] if p.grad is not None] + fp32_momentums, first_runs[1] = self.get_momentums(fp32_params) + + if self.materialize_master_grads: + fp16_model_params = [p for i, p in enumerate( + stash.fp16_groups[gid]) if stash.fp32_from_fp16_groups[gid][i].grad is not None] + fp32_from_fp16_grads = [ + p.grad for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None] + fp32_from_fp16_params = [ + p for p in stash.fp32_from_fp16_groups[gid] if p.grad is not None] + fp32_from_fp16_momentums, first_runs[0] = self.get_momentums( + fp32_from_fp16_params) + + fp16_set = [fp32_from_fp16_grads, fp32_from_fp16_params, + fp32_from_fp16_momentums, fp16_model_params] + else: + fp16_model_params = [ + p for p in stash.fp16_groups[gid] if p.grad is not None] + fp16_model_grads = [ + p.grad for p in stash.fp16_groups[gid] if p.grad is not None] + fp32_from_fp16_params = [p for i, p in enumerate( + stash.fp32_from_fp16_groups[gid]) if stash.fp16_groups[gid][i].grad is not None] + fp32_from_fp16_momentums, first_runs[0] = self.get_momentums( + fp32_from_fp16_params) + + fp16_set = [fp16_model_grads, fp32_from_fp16_params, + fp32_from_fp16_momentums, fp16_model_params] + + launch_sets = [fp16_set, [ + fp32_grads, fp32_params, fp32_momentums]] + else: + fp16_params = [p for p in group['params'] if ( + p.dtype == torch.float16 and p.grad is not None)] + fp16_grads = [p.grad for p in group['params'] if ( + p.dtype == torch.float16 and p.grad is not None)] + fp16_momentums, first_runs[0] = self.get_momentums(fp16_params) + + fp32_params = [p for p in group['params'] if ( + p.dtype == torch.float32 and p.grad is not None)] + fp32_grads = [p.grad for p in group['params'] if ( + p.dtype == torch.float32 and p.grad is not None)] + fp32_momentums, first_runs[1] = self.get_momentums(fp32_params) + + launch_sets = [[fp16_grads, fp16_params, fp16_momentums], + [fp32_grads, fp32_params, fp32_momentums]] + + for s, (launch_set, first_run) in enumerate(zip(launch_sets, first_runs)): + assert len(launch_set[0]) == len(launch_set[1]) + assert len(launch_set[0]) == len(launch_set[2]) + if len(launch_set[0]) > 0: + multi_tensor_applier( + self.multi_tensor_sgd, + self._dummy_overflow_buf, + launch_set, + weight_decay, + momentum, + dampening, + group['lr'], + nesterov, + first_run, + self.wd_after_momentum, + 1.0 / self.most_recent_scale) + + self.most_recent_scale = 1.0 + self.scale_set_by_backward = False + + return loss diff --git a/colossalai/nn/optimizer/lamb.py b/colossalai/nn/optimizer/lamb.py new file mode 100644 index 000000000..077049b48 --- /dev/null +++ b/colossalai/nn/optimizer/lamb.py @@ -0,0 +1,114 @@ +""" +Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb +""" + +import torch +from torch.optim import Optimizer + +from colossalai.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module +class Lamb(Optimizer): + r"""Implements Lamb algorithm. + It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + adam (bool, optional): always use trust ratio = 1, which turns this into + Adam. Useful for comparison purposes. + .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0, adam=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError( + "Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError( + "Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay) + self.adam = adam + super(Lamb, self).__init__(params, defaults) + + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError( + 'Lamb does not support sparse gradients, consider SparseAdam instad.') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + # Decay the first and second moment running average coefficient + # m_t + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + # v_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + # Paper v3 does not use debiasing. + # bias_correction1 = 1 - beta1 ** state['step'] + # bias_correction2 = 1 - beta2 ** state['step'] + # Apply bias to lr to avoid broadcast. + # * math.sqrt(bias_correction2) / bias_correction1 + step_size = group['lr'] + + weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10) + + adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) + if group['weight_decay'] != 0: + adam_step.add_(p.data, alpha=group['weight_decay']) + + adam_norm = adam_step.pow(2).sum().sqrt() + if weight_norm == 0 or adam_norm == 0: + trust_ratio = 1 + else: + trust_ratio = weight_norm / adam_norm + state['weight_norm'] = weight_norm + state['adam_norm'] = adam_norm + state['trust_ratio'] = trust_ratio + if self.adam: + trust_ratio = 1 + + p.data.add_(adam_step, alpha=-step_size * trust_ratio) + + return loss diff --git a/colossalai/nn/optimizer/lars.py b/colossalai/nn/optimizer/lars.py new file mode 100644 index 000000000..977e1d4e3 --- /dev/null +++ b/colossalai/nn/optimizer/lars.py @@ -0,0 +1,99 @@ +"""Adapted from https://github.com/NUS-HPC-AI-Lab/LARS-ImageNet-PyTorch/blob/main/lars.py""" + +from typing import Iterable + +import torch +from torch.optim import Optimizer + +from colossalai.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module +class Lars(Optimizer): + r"""Implements the LARS optimizer from `"Large batch training of convolutional networks" + `_. + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate + momentum (float, optional): momentum factor (default: 0) + eeta (float, optional): LARS coefficient as used in the paper (default: 1e-3) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + """ + + def __init__( + self, + params: Iterable[torch.nn.Parameter], + lr=1e-3, + momentum=0, + eeta=1e-3, + weight_decay=0, + epsilon=0.0 + ) -> None: + if not isinstance(lr, float) or lr < 0.0: + raise ValueError("Invalid learning rate: {}".format(lr)) + if momentum < 0.0: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if weight_decay < 0.0: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if eeta <= 0 or eeta > 1: + raise ValueError("Invalid eeta value: {}".format(eeta)) + if epsilon < 0: + raise ValueError("Invalid epsilon value: {}".format(epsilon)) + defaults = dict(lr=lr, momentum=momentum, + weight_decay=weight_decay, eeta=eeta, epsilon=epsilon, lars=True) + + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + eeta = group['eeta'] + lr = group['lr'] + lars = group['lars'] + eps = group['epsilon'] + + for p in group['params']: + if p.grad is None: + continue + decayed_grad = p.grad + scaled_lr = lr + if lars: + w_norm = torch.norm(p) + g_norm = torch.norm(p.grad) + trust_ratio = torch.where( + w_norm > 0 and g_norm > 0, + eeta * w_norm / (g_norm + weight_decay * w_norm + eps), + torch.ones_like(w_norm) + ) + trust_ratio.clamp_(0.0, 50) + scaled_lr *= trust_ratio.item() + if weight_decay != 0: + decayed_grad = decayed_grad.add(p, alpha=weight_decay) + decayed_grad = torch.clamp(decayed_grad, -10.0, 10.0) + + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone( + decayed_grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(decayed_grad) + decayed_grad = buf + + p.add_(decayed_grad, alpha=-scaled_lr) + + return loss diff --git a/colossalai/nn/optimizer/loss_scaler.py b/colossalai/nn/optimizer/loss_scaler.py new file mode 100644 index 000000000..19e124200 --- /dev/null +++ b/colossalai/nn/optimizer/loss_scaler.py @@ -0,0 +1,166 @@ +# Copyright 2019 The Microsoft DeepSpeed Team +# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Taken and modified for DeepSpeed from: +# https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py +# Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9 + + +INITIAL_LOSS_SCALE = 'init_scale' +SCALE_WINDOW = 'scale_window' +DELAYED_SHIFT = 'delayed_shift' +MIN_LOSS_SCALE = 'min_scale' + + +# item() is a recent addition, so this helps with backward compatibility. +def to_python_float(t): + if hasattr(t, 'item'): + return t.item() + return t[0] + + +class LossScalerBase: + """LossScalarBase + Base class for a loss scaler + """ + + def __init__(self, cur_scale): + self.cur_scale = cur_scale + + @property + def loss_scale(self): + return self.cur_scale + + def scale_gradient(self, module, grad_in, grad_out): + return tuple(self.loss_scale * g for g in grad_in) + + def update_scale(self, overflow): + pass + + def backward(self, loss, retain_graph=False): + scaled_loss = loss * self.loss_scale + scaled_loss.backward(retain_graph=retain_graph) + + +class LossScaler(LossScalerBase): + """ + Class that manages a static loss scale. This class is intended to interact with + :class:`FP16_Optimizer`, and should not be directly manipulated by the user. + Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to + :class:`FP16_Optimizer`'s constructor. + Args: + scale (float, optional, default=1.0): The loss scale. + """ + + def __init__(self, scale=1): + super(LossScaler, self).__init__(scale) + + # `params` is a list / generator of torch.Variable + def has_overflow(self, params): + return False + + # `x` is a torch.Tensor + def _has_inf_or_nan(x): + return False + + +class DynamicLossScaler(LossScalerBase): + """ + Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler` + indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of + :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler` + operates, because the default options can be changed using the + the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor. + Loss scaling is designed to combat the problem of underflowing gradients encountered at long + times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss + scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are + encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has + occurred. + :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch, + and :class:`DynamicLossScaler` adjusts the loss scale to a lower value. + If a certain number of iterations occur without overflowing gradients detected, + :class:`DynamicLossScaler` increases the loss scale once more. + In this way :class:`DynamicLossScaler` attempts to "ride the edge" of + always using the highest loss scale possible without incurring overflow. + Args: + init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.` + scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``. + scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale. + """ + + def __init__(self, + init_scale=2 ** 32, + scale_factor=2., + scale_window=1000, + min_scale=1, + delayed_shift=1, + consecutive_hysteresis=False): + super(DynamicLossScaler, self).__init__(init_scale) + self.cur_iter = 0 + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + self.min_scale = min_scale + self.delayed_shift = delayed_shift + self.cur_hysteresis = delayed_shift + self.consecutive_hysteresis = consecutive_hysteresis + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params): + for p in params: + if p.grad is not None and self._has_inf_or_nan(p.grad.data): + return True + + return False + + # `x` is a torch.Tensor + @staticmethod + def _has_inf_or_nan(x): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum: + return True + return False + + # `overflow` is boolean indicating whether the gradient overflowed + def update_scale(self, overflow): + if overflow: + # self.cur_scale /= self.scale_factor + if self.delayed_shift == 1 or self.cur_hysteresis == 1: + self.cur_scale = max( + self.cur_scale / self.scale_factor, self.min_scale) + else: + self.cur_hysteresis -= 1 + self.last_overflow_iter = self.cur_iter + else: + if self.consecutive_hysteresis: + self.cur_hysteresis = self.delayed_shift + if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0: + if not self.consecutive_hysteresis: + self.cur_hysteresis = self.delayed_shift + self.cur_scale *= self.scale_factor + self.cur_iter += 1 diff --git a/colossalai/nn/optimizer/zero_redundancy_optimizer_level_1.py b/colossalai/nn/optimizer/zero_redundancy_optimizer_level_1.py new file mode 100644 index 000000000..05848f1dd --- /dev/null +++ b/colossalai/nn/optimizer/zero_redundancy_optimizer_level_1.py @@ -0,0 +1,707 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math +from collections import defaultdict + +import torch +import torch.distributed as dist +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from torch.optim import Optimizer + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import OPTIMIZER_WRAPPERS +from colossalai.utils import get_current_device, print_rank_0 + + +def get_alignment_padding(flattened_lean_size, sub_partition_id, sub_partition_size): + sub_partition_high_limit = (sub_partition_id + 1) * sub_partition_size + if sub_partition_high_limit <= flattened_lean_size: + return 0 + else: + return min(sub_partition_size, sub_partition_high_limit - flattened_lean_size) + + +def get_group_alignment_padding(tensor_list, sub_partition_size, sub_partition_count): + group_paddings = [] + flattened_size = sum([tensor.numel() for tensor in tensor_list]) + for i in range(sub_partition_count): + padding = get_alignment_padding(flattened_size, i, sub_partition_size) + group_paddings.append(padding) + + return group_paddings + + +def _single_range_check(current_index, start_index, end_index, tensor_size): + offset = 0 + if (current_index >= start_index) and (current_index < end_index): + # Fully inside bounds + return True, offset + elif (start_index > current_index) and (start_index < (current_index + tensor_size)): + # Partially contained, compute offset + offset = start_index - current_index + return True, offset + else: + return False, offset + + +def _range_check(current_index, element_intervals, tensor_size): + results = [] + for comm_idx, interval in enumerate(element_intervals): + start_index, end_index = interval + contained, offset = _single_range_check( + current_index, start_index, end_index, tensor_size) + if contained: + results.append((contained, offset, comm_idx)) + if len(results) == 0: + return [(False, 0, -1)] + return results + + +@OPTIMIZER_WRAPPERS.register_module +class ZeroRedundancyOptimizer_Level_1(Optimizer): + """ + ZeroRedundancyOptimizer_Level_1 designed to reduce the memory footprint + required for training large deep learning models. + + For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models + https://arxiv.org/abs/1910.02054 + + This version aligns with stage-1 in the paper above. + """ + + def __init__(self, + init_optimizer: Optimizer, + dp_parallel_mode: ParallelMode = ParallelMode.DATA, + max_elements_per_comm=5e8, + verbose=False + ): + # TODO: this class does not work with fp16 AMP_TYPE.PARALLEL, fix it + assert get_current_device() != 'cpu', 'ZeRO optimizer cannot be used on CPU only' + + self.flatten = _flatten_dense_tensors + self.unflatten = _unflatten_dense_tensors + self.optimizer = init_optimizer + self.dp_parallel_mode = dp_parallel_mode + self.verbose = verbose + + # for compatibility with pytorch optim + self.defaults = init_optimizer.defaults + + # param flattened by groups + self._param_groups = [] + self._param_groups_flat = [] + + # parallel_sub_partitioned_fp16_groups[group-idx] -> [comm-ids] -> [rank-ids] + self.parallel_sub_partitioned_groups = [] + # same underlying data as above but viewed as: [groups] -> [rank-ids] -> [comm-ids] + self.parallel_comm_sub_partitioned_groups = [] + + # param partition info + # parameters in each group that will not be updated by this process directly + self.params_not_local = [] + + # parameters that will be updated by this process directly + self.params_in_rank_sub_partitions = [] + + # parameter offsets for parameters in sub-partitions. Parameter + # boundaries may not align with sub-partition boundaries + # so we need to keep track of the offsets + self.params_in_rank_sub_partitions_offsets = [] + + # number of elements per sub-partition in each group + self.sub_partition_sizes = [] + + # number of communication intervals for each group + self.num_comm_intervals_per_group = [] + + self.local_rank = gpc.get_local_rank(self.dp_parallel_mode) + self.partition_count = self.world_size = gpc.get_world_size( + self.dp_parallel_mode) + + self.group_paddings = [] + self.default_device = self.optimizer.param_groups[0]['params'][0].device + + # max elems per param group + self.max_elems_per_comm = [] + + # loop to deal with groups + for i, param_group in enumerate(self.optimizer.param_groups): + # push this group to list before modify + self._param_groups.append(param_group['params']) + + # calculate best max elements per comm based to minimize padding + self.max_elems_per_comm.append( + self.best_max_elems_per_comm( + num_elements=sum(t.numel() for t in self._param_groups[i]), + max_elements_per_comm=max_elements_per_comm + ) + ) + + # flattens all tensors into single 1d tensor aligned with sub-partition size for later dividing + # RS: create aligned sub-partitions + flat_aligned_params = self.flatten_dense_tensors_sub_partition_aligned( + tensor_list=self._param_groups[i], + max_elements_per_comm=self.max_elems_per_comm[i], + ) + self._param_groups_flat.append(flat_aligned_params) + + updated_params = self.unflatten(self._param_groups_flat[i], + self._param_groups[i]) + for p, q in zip(self._param_groups[i], updated_params): + p.data = q.data + + # divide the flat weights into near equal partition equal to the data parallel degree + # each process will compute on a different part of the partition + # RS: split into two layer list -> [comm-id] -> [sub-partitions per rank] + comm_partitions, dp_sub_partitions, element_intervals, sub_partition_size, num_comm_intervals = \ + self.get_data_parallel_sub_partitions( + tensor=self._param_groups_flat[i], + max_elements_per_comm=self.max_elems_per_comm[i], + ) + self.parallel_comm_sub_partitioned_groups.append( + comm_partitions) # comm -> rank + self.parallel_sub_partitioned_groups.append( + dp_sub_partitions) # rank -> comm + self.sub_partition_sizes.append(sub_partition_size) + self.num_comm_intervals_per_group.append(num_comm_intervals) + + # Compute sub_partition paddings + sub_partition_paddings = get_group_alignment_padding( + tensor_list=self._param_groups[i], + sub_partition_size=sub_partition_size, + sub_partition_count=num_comm_intervals * self.partition_count) + self.group_paddings.append(sub_partition_paddings) + + # modify optimizer of have flat master weight + param_group['params'] = self.parallel_sub_partitioned_groups[i][self.local_rank] + + # RS: divide up the sub-partitions and keep track of offsets for each param + # partition_size = len(self.fp16_groups_flat[i]) / dist.get_world_size(group=self.dp_process_group) + params_in_rank_sub_partition, params_in_rank_sub_partitions_offsets, params_not_local = self.get_all_sub_partition_info( + tensor_list=self._param_groups[i], + all_element_intervals=element_intervals, + ) + + self.params_in_rank_sub_partitions.append( + params_in_rank_sub_partition) + self.params_not_local.append(params_not_local) + self.params_in_rank_sub_partitions_offsets.append( + params_in_rank_sub_partitions_offsets) + + self.local_sub_partitions_of_groups = [ + group[self.local_rank] for group in self.parallel_sub_partitioned_groups] + self._initialize_optimizer_states() + + @property + def state(self): + return self.optimizer.state + + @state.setter + def state(self, value): + self.optimizer.state = value + + @property + def param_groups(self): + # LSG: return the full param groups instead of local partitions + # of the param groups for compatibility with torch.cuda.amp + param_groups = [] + + for group_id, group in enumerate(self.optimizer.param_groups): + group_containing_all_param = { + 'params': self._param_groups[group_id], + **{k: v for k, v in group.items() if k != 'params'} + } + # LSG: for compatibility with unknown bug with lr scheduler + # TODO: fix this + group_containing_all_param.setdefault('initial_lr', group['lr']) + param_groups.append(group_containing_all_param) + return param_groups + + @param_groups.setter + def param_groups(self, value): + self.optimizer.param_groups = value + + def _initialize_optimizer_states(self): + for group_idx, group in enumerate(self.local_sub_partitions_of_groups): + for idx, sub_partition_param in enumerate(group): + sub_partition_grad = torch.zeros(int( + self.sub_partition_sizes[group_idx]), + dtype=sub_partition_param.dtype).cuda() + sub_partition_param.grad = sub_partition_grad + + self.optimizer.step() + + # LSG: comment out for compatibility with torch.cuda.amp + # for group in self.local_sub_partitions_of_groups: + # for idx, sub_partition_param in enumerate(group): + # sub_partition_param.grad = None + + def best_max_elems_per_comm(self, num_elements, max_elements_per_comm): + # if we use max-elems-per-comm as is, how many comm intervals will there be + max_comm_intervals = math.ceil(num_elements / max_elements_per_comm) + padding_for_max_comm = (max_elements_per_comm * + max_comm_intervals) - num_elements + + # if we use 1 less comm interval how much extra comm padding would be required + min_comm_intervals = num_elements // max_elements_per_comm + if min_comm_intervals == 0: + if self.verbose: + print_rank_0( + f'Using default max_elements_per_comm {max_elements_per_comm}') + return max_elements_per_comm + + padding_for_min_comm = math.ceil( + num_elements / (self.world_size * min_comm_intervals)) + + # choose padding that uses least amount of overhead + if padding_for_max_comm > padding_for_min_comm: + new_max_elements_per_comm = padding_for_min_comm + max_elements_per_comm + if self.verbose: + print_rank_0( + f'Updating max_elements_per_comm from {max_elements_per_comm} -> {new_max_elements_per_comm}') + return new_max_elements_per_comm + else: + if self.verbose: + print_rank_0( + f'Using default max_elements_per_comm {max_elements_per_comm}') + return max_elements_per_comm + + def get_data_parallel_sub_partitions(self, + tensor, + max_elements_per_comm, + ): + total_num_elements = tensor.numel() + + # if total elements is less than our max, revert to splitting into dp partitions + max_elements_per_comm = min(total_num_elements, max_elements_per_comm) + sub_partition_size = int(max_elements_per_comm // self.world_size) + + # Ensure partition alignment was done correctly + num_sub_partitions = int(total_num_elements // sub_partition_size) + assert total_num_elements % sub_partition_size == 0, "{} % {} != 0".format(total_num_elements, + sub_partition_size) + + # Ensure comm interval alignment was done correctly. + num_comm_intervals = int(num_sub_partitions // self.world_size) + assert num_sub_partitions % self.world_size == 0, "{} % {} != 0".format( + num_sub_partitions, self.world_size) + + if self.verbose: + print_rank_0("**** partition info:") + print_rank_0(f"\t total_num_elements={total_num_elements}") + print_rank_0(f"\t world_size={self.world_size}") + print_rank_0(f"\t max_elements_per_comm={max_elements_per_comm}") + print_rank_0(f"\t sub_partition_size={sub_partition_size}") + print_rank_0(f"\t num_sub_partitions={num_sub_partitions}") + print_rank_0(f"\t num_comm_intervals={num_comm_intervals}") + print_rank_0("****") + + # [comm_id] -> [rank] + comm_partitions = [] + for _ in range(num_comm_intervals): + comm_partitions.append([]) + + start = 0 + comm_id = 0 + element_intervals = defaultdict( + list) # [rank] -> [(start,end), (start,end), ...] + for idx in range(num_sub_partitions): + rank_id = idx % self.world_size + sub_partition = tensor.narrow( + 0, start, sub_partition_size).detach() + element_intervals[rank_id].append( + (start, start + sub_partition_size)) + comm_partitions[comm_id].append(sub_partition) + start = start + sub_partition_size + if rank_id == (self.world_size - 1): + comm_id += 1 + + # [rank] -> [comm_id] + sub_partitions = [] + for _ in range(self.world_size): + sub_partitions.append([]) + for comm_id, partitions in enumerate(comm_partitions): + for rank_id, partition in enumerate(partitions): + sub_partitions[rank_id].append(partition) + + return comm_partitions, sub_partitions, element_intervals, sub_partition_size, num_comm_intervals + + def get_all_sub_partition_info(self, + tensor_list, + all_element_intervals, + ): + params_not_local = [] + + # [rank] -> [comm-id] -> [param/offset] + params_in_rank_sub_partition = [] + params_in_rank_sub_partitions_offsets = [] + + for rank in range(self.world_size): + params_in_local_sub_partition = [] + local_sub_partition_offsets = [] + comm_tensor_list = [] + comm_offset_list = [] + current_index = 0 + prev_comm_idx = 0 + for iii, tensor in enumerate(tensor_list): + tensor_size = tensor.numel() + results_list = _range_check(current_index, + all_element_intervals[rank], + tensor_size) + for contained, offset, comm_idx in results_list: + if contained: + if prev_comm_idx != comm_idx: + params_in_local_sub_partition.append( + comm_tensor_list) + comm_tensor_list = [] + local_sub_partition_offsets.append( + comm_offset_list) + comm_offset_list = [] + comm_tensor_list.append(tensor) + comm_offset_list.append(offset) + prev_comm_idx = comm_idx + elif rank == self.local_rank: + params_not_local.append(tensor) + + current_index = current_index + tensor_size + + # assert len(comm_tensor_list) > 0 + # assert len(comm_offset_list) > 0 + params_in_local_sub_partition.append(comm_tensor_list) + local_sub_partition_offsets.append(comm_offset_list) + + params_in_rank_sub_partition.append(params_in_local_sub_partition) + params_in_rank_sub_partitions_offsets.append( + local_sub_partition_offsets) + + return params_in_rank_sub_partition, params_in_rank_sub_partitions_offsets, params_not_local + + def get_flat_sub_partitions(self, + comm_tensor_list, + comm_param_offsets, + sub_partition_size, + dtype, + default_device, + num_comm_intervals=None, + return_partition_params=False): + partition_params = [] + final_param_offsets = [] + flat_sub_partitions = [] + for tensor_list, param_offsets in zip(comm_tensor_list, comm_param_offsets): + flat_tensor_list = [] + current_size = 0 + my_offsets = [] + my_params = [] + + for i, tensor in enumerate(tensor_list): + if tensor.grad is None: + tensor.grad = torch.zeros(tensor.size(), + dtype=tensor.dtype, + device=tensor.device) + param = tensor + tensor = tensor.grad + num_elements = tensor.numel() + tensor_offset = 0 + + # we need to offset to get to the right element + if i == 0 and param_offsets[i] > 0: + tensor_offset = param_offsets[i] + num_elements = num_elements - tensor_offset + + # We don't need all elements of the tensor if this tensor is + # larger than we have space for in our curr sub-partition + if num_elements > (sub_partition_size - current_size): + num_elements = sub_partition_size - current_size + + # we need a narrow view of the tensor based on the tensor offset and number of elements that + # we need from this tensor + if tensor_offset > 0 or num_elements < tensor.numel(): + flat_tensor_list.append(tensor.contiguous().view(-1).narrow( + 0, + int(tensor_offset), + int(num_elements)).to(dtype)) + else: + flat_tensor_list.append(tensor.to(dtype)) + my_params.append(param) + + # remember offset into partition and #elems for this tensor + my_offsets.append((current_size, num_elements)) + + current_size = current_size + num_elements + + # this means its the last partition and does not align with the dp boundary. We need to pad before flattening + if current_size < sub_partition_size: + my_offsets.append((None, None)) + my_params.append(None) + if len(tensor_list) == 0: + assert default_device != None + flat_tensor_list.append( + torch.zeros(int(sub_partition_size - current_size), + dtype=dtype, + device=default_device)) + else: + flat_tensor_list.append( + torch.zeros(int(sub_partition_size - current_size), + dtype=dtype, + device=tensor_list[0].device)) + partition_params.append(my_params) # flat_tensor_list) + final_param_offsets.append(my_offsets) + assert len(flat_tensor_list) == len(my_offsets), "{} {}".format( + len(flat_tensor_list), len(my_offsets)) + flat_sub_partitions.append(self.flatten(flat_tensor_list)) + if num_comm_intervals is not None and len( + flat_sub_partitions) < num_comm_intervals: + # print("padding w. sub partitions to ensure uniform communication") + device = flat_sub_partitions[0].device + for _ in range(num_comm_intervals - len(flat_sub_partitions)): + flat_sub_partitions.append( + torch.zeros(int(sub_partition_size), + dtype=dtype, + device=device)) + partition_params.append([None]) + final_param_offsets.append([(None, None)]) + + if return_partition_params: + assert len(flat_sub_partitions) == len(partition_params) + assert len(partition_params) == len(final_param_offsets), "{} {}".format(len(partition_params), + len(final_param_offsets)) + return flat_sub_partitions, partition_params, final_param_offsets + return flat_sub_partitions + + def zero_grad(self, set_grads_to_None=False): + """ + Zero FP16 parameter grads. + """ + # FP32 grad should never exist. + # For speed, set model fp16 grad to None by default + for group in self._param_groups: + for p in group: + if set_grads_to_None: + p.grad = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def free_grad_in_param_list(self, param_list): + for p in param_list: + if isinstance(p, list): + for _p in p: + _p.grad = None + else: + p.grad = None + + def flatten_dense_tensors_sub_partition_aligned(self, + tensor_list, + max_elements_per_comm + ): + assert max_elements_per_comm >= self.world_size, f"max_elements_per_comm {max_elements_per_comm} < dp {self.world_size}" + + num_elements = sum(t.numel() for t in tensor_list) + + # Compute aligned partition size based on parameter count + aligned_param_partition_size = math.ceil( + num_elements / self.world_size) + + # Compute aligned partition size based on communication size + aligned_comm_partition_size = int( + max_elements_per_comm // self.world_size) + + if aligned_param_partition_size <= aligned_comm_partition_size: + sub_partition_count = 1 + sub_partition_size = aligned_param_partition_size + else: + sub_partition_count = math.ceil(aligned_param_partition_size / + aligned_comm_partition_size) + sub_partition_size = aligned_comm_partition_size + + # Compute required padding for alignment to dp and max_elements_per_comm + padding = (sub_partition_count * sub_partition_size * + self.world_size) - num_elements + + if self.verbose: + print_rank_0( + f"sub_partition_count: {sub_partition_count}, sub_partition_size: {sub_partition_size}, padding: {padding}") + print_rank_0( + f"number of elements with padding: {num_elements} + {padding} = {num_elements + padding}") + + if padding == 0: + aligned_tensor_list = tensor_list + else: + pad_tensor = torch.zeros(padding, + device=tensor_list[0].device, + dtype=tensor_list[0].dtype) + aligned_tensor_list = tensor_list + [pad_tensor] + + flat_tensors = self.flatten(aligned_tensor_list) + return flat_tensors + + # def reduce_gradients(self): + # # LSG: this reduce gradients method no longer works + # # after code change, please use DataParallelGradientHandler instead + # + # world_size = gpc.get_world_size(self.parallel_mode) + # local_rank = gpc.get_local_rank(self.parallel_mode) + # + # for i, group in enumerate(self._param_groups): + # num_comm_intervals = self.num_comm_intervals_per_group[i] + # all_sub_partitions = [] + # for rank in range(world_size): + # # gsp is list of partitions indexed by comm_idx + # grad_sub_partitions = self.get_flat_sub_partitions( + # comm_tensor_list=self.params_in_rank_sub_partitions[i][rank], + # comm_param_offsets=self.params_in_rank_sub_partitions_offsets[i][rank], + # dtype=self.local_sub_partitions_of_groups[i][0].dtype, + # default_device=self.default_device, + # sub_partition_size=self.sub_partition_sizes[i], + # num_comm_intervals=self.num_comm_intervals_per_group[i]) + # all_sub_partitions.append(grad_sub_partitions) + # + # assert len(grad_sub_partitions) == num_comm_intervals + # + # local_comm_partitions = [] + # for comm_idx in range(num_comm_intervals): + # single_comm_all_partitions = [] + # for rank in range(world_size): + # single_comm_all_partitions.append(all_sub_partitions[rank][comm_idx]) + # + # for partition in single_comm_all_partitions: + # partition.div_(world_size) + # + # dist.reduce_scatter(output=single_comm_all_partitions[local_rank], + # input_list=single_comm_all_partitions, + # group=gpc.get_group(self.parallel_mode)) + + def step(self, closure=None): + local_sub_partitions_grad_groups = [] + + for i, group in enumerate(self._param_groups): + # RS: update free grads w.r.t. sub partitions + # free gradients for all the parameters that are not updated by this process + self.free_grad_in_param_list(self.params_not_local[i]) + + # create flat gradient partitions for parameters updated by this process + local_grad_sub_partitions = self.get_flat_sub_partitions( + comm_tensor_list=self.params_in_rank_sub_partitions[i][self.local_rank], + comm_param_offsets=self.params_in_rank_sub_partitions_offsets[i][self.local_rank], + sub_partition_size=self.sub_partition_sizes[i], + dtype=self.local_sub_partitions_of_groups[i][0].dtype, + num_comm_intervals=self.num_comm_intervals_per_group[i], + default_device=self.default_device) + + # RS: update all our local params with sub-partition grads + for idx, sub_partition_param in enumerate(self.local_sub_partitions_of_groups[i]): + sub_partition_param.grad = local_grad_sub_partitions[idx] + + # RS: update free grads for sub-partitions + # release all the gradient since we have already created a necessary copy in dp_grad_partition + self.free_grad_in_param_list( + self.params_in_rank_sub_partitions[i][self.local_rank]) + + local_sub_partitions_grad_groups.append(local_grad_sub_partitions) + + if closure is None: + loss = self.optimizer.step() + else: + loss = self.optimizer.step(closure=closure) + + # RS: clear our sub partition grads + # LSG: not needed as amp is used instead + # get rid of the fp32 gradients. Not needed anymore + # for group in self.local_sub_partitions_of_groups: + # for idx, sub_partition_param in enumerate(group): + # sub_partition_param.grad = None + + # RS: all_gather/broadcast sub-partitions in separate comm calls + # gather the updated weights from everyone + for all_sub_partitions in self.parallel_comm_sub_partitioned_groups: + for comm_id, sub_partitions in enumerate(all_sub_partitions): + dist.all_gather(sub_partitions, + sub_partitions[self.local_rank], + group=gpc.get_group(self.dp_parallel_mode)) + + # TODO: we probably don't need this? just to be safe + for i in range(len(self._param_groups)): + updated_params = self.unflatten(self._param_groups_flat[i], + self._param_groups[i]) + for p, q in zip(self._param_groups[i], updated_params): + p.data = q.data + + return loss + + def _rigid_state_dict(self): + """Returns a dict that can be loaded for continued training with same DP degree + + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + + Example:: + + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + state_dict = {} + for k, v in self.optimizer.state_dict().items(): + state_dict[k] = v + state_dict[ + 'local_sub_partitions_of_groups'] = self.local_sub_partitions_of_groups + return state_dict + + def state_dict(self): + """ + Returns a dict containing the current state of this Optimizer instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + + Example:: + + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + return self._rigid_state_dict() + + def load_state_dict(self, + state_dict, + load_optimizer_states=True, + ): + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + + Example:: + + model = torch.nn.Linear(D_in, D_out).cuda().half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + self._rigid_load_state_dict( + state_dict, + load_optimizer_states) + + def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True): + # I think it should actually be ok to reload the optimizer before the model. + state_dict_ = state_dict.copy() + local_sub_partitions_of_groups = state_dict_.pop( + 'local_sub_partitions_of_groups') + + if load_optimizer_states: + self.optimizer.load_state_dict(state_dict_) + + for curr_group, saved_group in zip(self.local_sub_partitions_of_groups, + local_sub_partitions_of_groups): + for curr_param, saved_param in zip(curr_group, saved_group): + curr_param.data.copy_(saved_param.data) diff --git a/colossalai/nn/optimizer/zero_redundancy_optimizer_level_2.py b/colossalai/nn/optimizer/zero_redundancy_optimizer_level_2.py new file mode 100644 index 000000000..17e277843 --- /dev/null +++ b/colossalai/nn/optimizer/zero_redundancy_optimizer_level_2.py @@ -0,0 +1,2343 @@ +''' +Copyright 2019 The Microsoft DeepSpeed Team +''' + +import math + +import torch +import torch.distributed as dist +try: + from deepspeed.git_version_info import version + from deepspeed.moe.utils import is_moe_param + from deepspeed.ops.adam import DeepSpeedCPUAdam + from deepspeed.ops.op_builder import UtilsBuilder + from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION_GRADIENTS +except ImportError: + print('DeepSpeed is required if you want to use ZeRO.') +from packaging import version as pkg_version +from torch._six import inf +from torch.distributed.distributed_c10d import _get_global_rank +from torch.optim import Optimizer + +from colossalai.core import global_context as gpc +from colossalai.registry import OPTIMIZER_WRAPPERS +from colossalai.utils import report_memory_usage +from ._utils import is_model_parallel_parameter +from .loss_scaler import LossScaler, DynamicLossScaler +from ...context.parallel_mode import ParallelMode + +# Toggle this to true to enable correctness test +# with gradient partitioning and without +pg_correctness_test = False + + +def input(msg): + return + + +def split_half_float_double(tensors): + dtypes = [ + "torch.cuda.HalfTensor", + "torch.cuda.FloatTensor", + "torch.cuda.DoubleTensor" + ] + buckets = [] + for i, dtype in enumerate(dtypes): + bucket = [t for t in tensors if t.type() == dtype] + if bucket: + buckets.append(bucket) + return buckets + + +def isclose(a, b, rtol=1e-09, atol=0.0): + return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol) + + +def lcm(x, y): + from fractions import gcd # or can import gcd from `math` in Python 3 + return x * y // gcd(x, y) + + +def get_alignment_padding(tensor_list, alignment): + num_elements = sum([tensor.numel() for tensor in tensor_list]) + remainder = num_elements % alignment + return (alignment - remainder) if remainder else remainder + + +def move_to_cpu(tensor_list): + for tensor in tensor_list: + tensor.data = tensor.data.cpu() + + +def print_rank_msg(msg): + print(f"rank {dist.get_rank()} - {msg}") + + +@OPTIMIZER_WRAPPERS.register_module +class ZeroRedundancyOptimizer_Level_2(Optimizer): + """ + ZeroRedundancyOptimizer_Level_2 designed to reduce the memory footprint + required for training large deep learning models. + + For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models + https://arxiv.org/abs/1910.02054 + + """ + + def __init__(self, + init_optimizer, + dp_parallel_mode=ParallelMode.DATA, + static_loss_scale=1.0, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=False, + contiguous_gradients=True, + reduce_bucket_size=500000000, + allgather_bucket_size=5000000000, + reduce_scatter=True, + overlap_comm=False, + cpu_offload=False, + clip_grad=0.0, + allreduce_always_fp32=False, + postscale_gradients=True, + gradient_predivide_factor=1.0, + gradient_accumulation_steps=1, + ignore_unused_parameters=True, + round_robin_gradients=False, + fp16_master_weights_and_gradients=False): + # mpu = None is removed from the parameter list + # tensor parallel will be automatically detected later + + # LSG: default arguments for compatibility + has_moe_layers = False + partition_grads = True + expert_parallel_group = None + expert_data_parallel_group = None + self.timers = None + self.defaults = init_optimizer.defaults + + dp_process_group = gpc.get_group(dp_parallel_mode) + if gpc.get_world_size(dp_parallel_mode) == 1: + partition_grads = False # for compatibility with dp size = 1 + + self.verbose = verbose + + if dist.get_rank() == 0 and self.verbose: + print(f"Reduce bucket size {reduce_bucket_size}") + print(f"Allgather bucket size {allgather_bucket_size}") + print(f"CPU Offload: {cpu_offload}") + print( + f'Round robin gradient partitioning: {round_robin_gradients}') + # The fused optimizer does all the work. We need this layer for two reason: + # 1. maintain same user API from apex.fp16_utils + # 2. keep common stuff here in case we need to add ne552w fused optimizer later + + # differences from apex.fp16_utils: + # - assume all model params in fp16 + # - assume all params requires grad + # - flat by groups, not keeping state. TODO: remove state explicitly? + # - master gard and unflat master weight never exist. TODO: a way to save out unflat master? + if not torch.cuda.is_available: + raise SystemError("Cannot use fp16 without CUDA.") + self.optimizer = init_optimizer + + # Load pre-built or JIT compile (un)flatten ops + util_ops = UtilsBuilder().load() + self.flatten = util_ops.flatten + self.unflatten = util_ops.unflatten + + # ZeRO stage 1 (False) or 2 (True) + self.partition_gradients = partition_grads + + self.reduce_scatter = reduce_scatter + + self.overlap_comm = overlap_comm + + self.cpu_offload = cpu_offload + + self.deepspeed_adam_offload = cpu_offload + + self.device = torch.cuda.current_device() if not self.cpu_offload else 'cpu' + + self.dp_process_group = dp_process_group + + # expert parallel group + self.ep_process_group = expert_parallel_group + + # data parallel group for experts + self.expert_dp_process_group = expert_data_parallel_group + + # data parallel size for non-experts + dp_size = dist.get_world_size(group=self.dp_process_group) + + # For MoE models this maybe different for different param group + # It will be modified during MoE setup later in the init + self.real_dp_process_group = [ + dp_process_group for i in range(len(self.optimizer.param_groups)) + ] + self.partition_count = [dp_size for i in range( + len(self.optimizer.param_groups))] + + self.is_gradient_accumulation_boundary = True + + # CPU-Offload requires contiguous gradients + self.contiguous_gradients = contiguous_gradients or cpu_offload + + self.has_moe_layers = has_moe_layers + + if self.has_moe_layers: + self._configure_moe_settings() + + if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_world_size(ParallelMode.TENSOR) == 1: + self.model_parallel_group = None + self.model_parallel_rank = 0 + else: + self.model_parallel_group = gpc.get_group(ParallelMode.TENSOR) + self.model_parallel_rank = gpc.get_local_rank(ParallelMode.TENSOR) + + self.overflow = False + self.clip_grad = clip_grad + self.allreduce_always_fp32 = allreduce_always_fp32 + self.gradient_predivide_factor = gradient_predivide_factor + self.postscale_gradients = postscale_gradients + self.gradient_accumulation_steps = gradient_accumulation_steps + self.micro_step_id = 0 + self.ignore_unused_parameters = ignore_unused_parameters + self.round_robin_gradients = round_robin_gradients + + self.extra_large_param_to_reduce = None + self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients + + if self.fp16_master_weights_and_gradients: + assert self.cpu_offload and type(self.optimizer) in [ + DeepSpeedCPUAdam], f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32. Currenty only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}. Either disable fp16_master_weights_and_gradients or enable ZeRO-2 Offload with DeepSpeedCPUAdam" + + if self.reduce_scatter: + assert not self.allreduce_always_fp32, "allreduce_always_fp32 is not yet supported with ZeRO-2 with reduce scatter enabled" + assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-2 with reduce scatter enabled" + assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-2 with reduce scatter enabled" + + # param flattened by groups + self.fp16_groups = [] + self.fp16_groups_flat = [] + + # param partitioned by data parallel degree + # this will contain a list of equal sized tensors + # each of which will be updated by a different process + self.parallel_partitioned_fp16_groups = [] + + # a single 32-bit partition of the parallel partitioned parameters + # that this process will update + self.single_partition_of_fp32_groups = [] + + # param partition info + + # These are the parameters in each group that will not be updated by this process directly + self.params_not_in_partition = [] + + # These are the parameters that will be updated by this process directly + self.params_in_partition = [] + + # Offset from the first paramter in the the self.params_in_partition + # the parameter boundaries may not align with partition boundaries + # so we need to keep track of the offset + self.first_offset = [] + + # number of elements per partition in each group + self.partition_size = [] + + # align nccl all-gather send buffers to 4-bye boundary + # 4-byte alignment/sizeof(fp16) = 2 + self.nccl_start_alignment_factor = 2 + + assert ( + allgather_bucket_size % self.nccl_start_alignment_factor == 0), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} " + + self.all_reduce_print = False + self.dtype = self.optimizer.param_groups[0]['params'][0].dtype + + self.round_robin_fp16_groups = [] + self.round_robin_fp6_indices = [] + + # padding on each partition for alignment purposes + self.groups_padding = [] + # loop to deal with groups + for i, param_group in enumerate(self.optimizer.param_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + + # push this group to list before modify + # TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group + self.fp16_groups.append(param_group['params']) + + # Record padding required to align group to world size + if partition_id == dist.get_world_size( + group=self.real_dp_process_group[i]) - 1: + padding = get_alignment_padding(self.fp16_groups[i], + self.partition_count[i]) + else: + padding = 0 + self.groups_padding.append(padding) + + # not sure why apex was cloning the weights before flattening + # removing cloning here + + if self.verbose: + report_memory_usage(f"Before moving param group {i} to CPU") + # move all the parameters to cpu to free up GPU space for creating flat buffer + move_to_cpu(self.fp16_groups[i]) + if self.verbose: + report_memory_usage(f"After moving param group {i} to CPU") + + # Reorder group parameters for load balancing of gradient partitioning during backward among ranks. + # This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks. + # For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging + # to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m). + if self.round_robin_gradients: + round_robin_tensors, round_robin_indices = self._round_robin_reorder( + self.fp16_groups[i], + dist.get_world_size(group=self.real_dp_process_group[i]) + ) + else: + round_robin_tensors = self.fp16_groups[i] + round_robin_indices = list(range(len(self.fp16_groups[i]))) + + self.round_robin_fp16_groups.append(round_robin_tensors) + self.round_robin_fp6_indices.append(round_robin_indices) + + # create flat buffer in CPU and move to GPU + self.fp16_groups_flat.append( + self.flatten_dense_tensors_aligned( + self.round_robin_fp16_groups[i], + self.nccl_start_alignment_factor * + dist.get_world_size(group=self.real_dp_process_group[i])).cuda( + torch.cuda.current_device())) + + if self.verbose: + report_memory_usage( + f"After flattening and moving param group {i} to GPU") + + if dist.get_rank(group=self.real_dp_process_group[i]) == 0: + report_memory_usage( + f"After Flattening and after emptying param group {i} cache") + + # set model fp16 weight to slices of flattened buffer + self._update_model_fp16_weights(i) + + # divide the flat weights into near equal partition equal to the data parallel degree + # each process will compute on a different part of the partition + data_parallel_partitions = self.get_data_parallel_partitions( + self.fp16_groups_flat[i], + i) + self.parallel_partitioned_fp16_groups.append( + data_parallel_partitions) + + # verify that data partition start locations are 4-byte aligned + for partitioned_data in data_parallel_partitions: + assert (partitioned_data.data_ptr() % + (2 * self.nccl_start_alignment_factor) == 0) + + # a partition of the fp32 master weights that will be updated by this process + if not fp16_master_weights_and_gradients: + self.single_partition_of_fp32_groups.append( + self.parallel_partitioned_fp16_groups[i][partition_id].to( + self.device).clone().float().detach()) + else: + self.single_partition_of_fp32_groups.append( + self.parallel_partitioned_fp16_groups[i][partition_id].to( + self.device).clone().half().detach()) + + # modify optimizer of have flat master weight + self.single_partition_of_fp32_groups[ + i].requires_grad = True # keep this in case internal optimizer uses it + param_group['params'] = [self.single_partition_of_fp32_groups[i]] + + partition_size = len(self.fp16_groups_flat[i]) / dist.get_world_size( + group=self.real_dp_process_group[i]) + params_in_partition, params_not_in_partition, first_offset = self.get_partition_info( + self.round_robin_fp16_groups[i], + partition_size, + partition_id) + + self.partition_size.append(partition_size) + self.params_in_partition.append(params_in_partition) + self.params_not_in_partition.append(params_not_in_partition) + self.first_offset.append(first_offset) + + for rank in range(dist.get_world_size()): + if dist.get_rank() == rank and self.verbose: + print( + f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i, p in enumerate(self.single_partition_of_fp32_groups)]} " + ) + dist.barrier() + # exit(0) + self.reduce_bucket_size = int(reduce_bucket_size) + self.allgather_bucket_size = int(allgather_bucket_size) + + self.reduction_event = torch.cuda.Event( + enable_timing=False, blocking=False) + self.reduction_stream = torch.cuda.Stream() + self.cpu_computation_stream = torch.cuda.Stream() + self.copy_grad_stream = torch.cuda.Stream() + self.callback_queued = False + + self.param_dict = {} + + # map between param_id and bool to specify if a param is in this partition + self.is_param_in_current_partition = {} + + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + self.elements_in_ipg_bucket = 0 + self.params_already_reduced = [] + self._release_ipg_buffers() + self.previous_reduced_grads = None + self.ipg_bucket_has_moe_params = False + + # simplified param id + self.param_id = {} + + largest_param_numel = 0 + count = 0 + for i, params_group in enumerate(self.fp16_groups): + for param in params_group: + unique_id = id(param) + self.param_id[unique_id] = count + self.param_dict[count] = param + self.params_already_reduced.append(False) + if param.numel() > largest_param_numel: + largest_param_numel = param.numel() + count = count + 1 + + for param_group in self.params_in_partition: + for param in param_group: + self.is_param_in_current_partition[self.get_param_id( + param)] = True + + for param_group in self.params_not_in_partition: + for param in param_group: + self.is_param_in_current_partition[self.get_param_id( + param)] = False + + if self.cpu_offload: + self.accumulated_grads_in_cpu = {} + self.norm_for_param_grads = {} + self.local_overflow = False + self.grad_position = {} + self.temp_grad_buffer_for_cpu_offload = torch.zeros( + largest_param_numel, + device=self.device, + dtype=self.dtype).pin_memory() + self.temp_grad_buffer_for_gpu_offload = torch.zeros( + largest_param_numel, + device=torch.cuda.current_device(), + dtype=self.dtype) + + for i, params_group in enumerate(self.fp16_groups): + self.get_grad_position(i, + self.params_in_partition[i], + self.first_offset[i], + self.partition_size[i]) + + # mapping from parameter to partition that it belongs to + self.param_to_partition_ids = {} + + # stores if a partition has been reduced in this step + self.is_partition_reduced = {} + + # number of grads in partition that still need to be computed + self.remaining_grads_in_partition = {} + + # total number of grads in partition + self.total_grads_in_partition = {} + + # stores if a grad in a partition has been computed or not + self.is_grad_computed = {} + + # stores the offset at which a parameter gradient needs to be inserted in a partition + self.grad_partition_insertion_offset = {} + + # the offset in the gradient at which it must be inserted at the beginning of the partition + self.grad_start_offset = {} + + # will store the averaged gradients required by this partition + self.averaged_gradients = {} + + # store index of first parameter in each partition + self.first_param_index_in_partition = {} + + # initializes all data structures for implementing gradient partitioning + self.initialize_gradient_partitioning_data_structures() + + # resets the data structure value for the next backward propagation + self.reset_partition_gradient_structures() + + # creates backward hooks for gradient partitioning + if self.partition_gradients or self.overlap_comm: + self.create_reduce_and_remove_grad_hooks() + + # we may have a way of fusing dynamic scale. Do not support for now + if self.dtype == torch.float or not dynamic_loss_scale: + loss_scale_value = 1.0 if self.dtype == torch.float else static_loss_scale + + self.dynamic_loss_scale = False + self.loss_scaler = LossScaler(scale=loss_scale_value) + cur_iter = 0 + else: + if dynamic_loss_args is None: + self.loss_scaler = DynamicLossScaler() + else: + self.loss_scaler = DynamicLossScaler(**dynamic_loss_args) + + self.dynamic_loss_scale = True + + if self.verbose: + report_memory_usage("Before initializing optimizer states") + self.initialize_optimizer_states() + if self.verbose: + report_memory_usage("After initializing optimizer states") + + if dist.get_rank() == 0: + print(f"optimizer state initialized") + + if dist.get_rank(group=self.dp_process_group) == 0: + report_memory_usage(f"After initializing ZeRO optimizer") + + def _configure_moe_settings(self): + assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" + assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" + + def is_moe_group(group): + return 'moe' in group and group['moe'] + + assert any([is_moe_group(group) for group in + self.optimizer.param_groups]), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" + self.is_moe_param_group = [] + for i, group in enumerate(self.optimizer.param_groups): + if is_moe_group(group): + assert all( + [is_moe_param(param) for param in group['params']]), "All params in MoE group must be MoE params" + self.real_dp_process_group[i] = self.expert_dp_process_group + self.partition_count[i] = dist.get_world_size( + group=self.expert_dp_process_group) + self.is_moe_param_group.append(True) + else: + self.is_moe_param_group.append(False) + + assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE" + assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE" + + def _update_model_fp16_weights(self, group_index): + updated_params = self.unflatten(self.fp16_groups_flat[group_index], + self.round_robin_fp16_groups[group_index]) + for p, q in zip(self.round_robin_fp16_groups[group_index], updated_params): + p.data = q.data + + # set model fp16 weight to slices of reordered flattened buffer + for param_index, param in enumerate(self.fp16_groups[group_index]): + new_index = self.round_robin_fp6_indices[group_index][param_index] + param.data = self.round_robin_fp16_groups[group_index][new_index].data + + def _round_robin_reorder(self, tensor_list, num_partitions): + + # disable round robin if need to debug something + # return tensor_list, list(range(len(tensor_list))) + + partition_tensors = {} + + for i, tensor in enumerate(tensor_list): + j = i % num_partitions + if not j in partition_tensors: + partition_tensors[j] = [] + partition_tensors[j].append((i, tensor)) + + reordered_tensors = [] + reordered_indices = {} + + for partition_index in partition_tensors.keys(): + for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]): + reordered_indices[original_index] = len(reordered_tensors) + reordered_tensors.append(tensor) + + return reordered_tensors, reordered_indices + + def _release_ipg_buffers(self): + if self.contiguous_gradients: + self.ipg_buffer = None + self.grads_in_partition = None + self.grads_in_partition_offset = 0 + + def initialize_optimizer_states(self): + + for i, group in enumerate(self.fp16_groups): + single_grad_partition = torch.zeros( + int(self.partition_size[i]), + dtype=self.single_partition_of_fp32_groups[i].dtype, + device=self.device) + self.single_partition_of_fp32_groups[ + i].grad = single_grad_partition.pin_memory( + ) if self.cpu_offload else single_grad_partition + + self.optimizer.step() + + if not self.cpu_offload: + for group in self.single_partition_of_fp32_groups: + group.grad = None # class init + + return + + ######################################################################### + #################### ZeRO Stage 1 - reduce gradients #################### + ######################################################################### + + def reduce_gradients(self, pipeline_parallel=False): + world_size = dist.get_world_size(self.dp_process_group) + my_rank = dist.get_rank(self.dp_process_group) + + # with PP we must create ipg buffer, since backward is handled outside zero + if pipeline_parallel and self.contiguous_gradients: + self.ipg_buffer = [] + buf_0 = torch.empty(int(self.reduce_bucket_size), + dtype=self.dtype, + device=torch.cuda.current_device()) + self.ipg_buffer.append(buf_0) + self.ipg_index = 0 + + if not self.overlap_comm: + for i, group in enumerate(self.fp16_groups): + for param in group: + if param.grad is not None: + self.reduce_ready_partitions_and_remove_grads(param, i) + + # reduce any pending grads in either hook/non-hook case + self.overlapping_partition_gradients_reduce_epilogue() + + ######################################################################### + #########################ZeRO Partition Gradients######################## + ######################################################################### + + def get_first_param_index(self, group_id, param_group, partition_id): + for index, param in enumerate(param_group): + param_id = self.get_param_id(param) + if partition_id in self.param_to_partition_ids[group_id][param_id]: + return index + return None + + def initialize_gradient_partitioning_data_structures(self): + + for i, param_group in enumerate(self.round_robin_fp16_groups): + + total_partitions = dist.get_world_size( + group=self.real_dp_process_group[i]) + + self.param_to_partition_ids[i] = {} + self.is_partition_reduced[i] = {} + self.total_grads_in_partition[i] = {} + self.remaining_grads_in_partition[i] = {} + self.is_grad_computed[i] = {} + self.grad_partition_insertion_offset[i] = {} + self.grad_start_offset[i] = {} + self.first_param_index_in_partition[i] = {} + + for partition_id in range(total_partitions): + self.is_grad_computed[i][partition_id] = {} + self.grad_partition_insertion_offset[i][partition_id] = {} + self.grad_start_offset[i][partition_id] = {} + self.total_grads_in_partition[i][partition_id] = 0 + self.initialize_gradient_partition( + i, param_group, partition_id) + self.is_partition_reduced[i][partition_id] = False + self.first_param_index_in_partition[i][ + partition_id] = self.get_first_param_index( + i, + param_group, + partition_id) + + def independent_gradient_partition_epilogue(self): + if self.verbose: + self.report_ipg_memory_usage( + f"In ipg_epilogue before reduce_ipg_grads", 0) + self.reduce_ipg_grads() + if self.verbose: + self.report_ipg_memory_usage( + f"In ipg_epilogue after reduce_ipg_grads", 0) + + # if dist.get_rank() == 0: + # print()("Params already reduced %s", self.params_already_reduced) + for i in range(len(self.params_already_reduced)): + self.params_already_reduced[i] = False + + if self.overlap_comm: + torch.cuda.synchronize() + # It is safe to clear previously reduced grads of other partitions + self._clear_previous_reduced_grads() + + if self.cpu_offload is False: + for i, _ in enumerate(self.fp16_groups): + + if not i in self.averaged_gradients or self.averaged_gradients[i] is None: + self.averaged_gradients[i] = self.get_flat_partition( + self.params_in_partition[i], + self.first_offset[i], + self.partition_size[i], + dtype=self.dtype, + device=torch.cuda.current_device(), + return_tensor_list=True) + else: + avg_new = self.get_flat_partition(self.params_in_partition[i], + self.first_offset[i], + self.partition_size[i], + dtype=self.dtype, + device=torch.cuda.current_device(), + return_tensor_list=True) + + for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new): + accumulated_grad.add_(new_avg_grad) + + self._release_ipg_buffers() + + # No need to keep the gradients anymore. + # All gradients required by the step + # are in self.averaged_gradients + self.zero_grad() + + if self.verbose: + report_memory_usage(f"End ipg_epilogue") + + # resets all partition to no reduced + # sets remaining grads to the total number of grads in each partition + # set is grad computed to false for all grads in partition + def reset_partition_gradient_structures(self): + for i, _ in enumerate(self.fp16_groups): + total_partitions = dist.get_world_size( + group=self.real_dp_process_group[i]) + for partition_id in range(total_partitions): + self.is_partition_reduced[i][partition_id] = False + self.remaining_grads_in_partition[i][ + partition_id] = self.total_grads_in_partition[i][partition_id] + + for param_id in self.is_grad_computed[i][partition_id]: + self.is_grad_computed[i][partition_id][param_id] = False + + def initialize_gradient_partition(self, i, param_group, partition_id): + def set_key_value_list(dictionary, key, value): + if key in dictionary: + dictionary[key].append(value) + else: + dictionary[key] = [value] + + def increment_value(dictionary, key): + if key in dictionary: + dictionary[key] += 1 + else: + dictionary[key] = 1 + + partition_size = self.partition_size[i] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for param in param_group: + + param_size = param.numel() + param_id = self.get_param_id(param) + + if (current_index >= start_index and current_index < end_index): + set_key_value_list(self.param_to_partition_ids[i], + param_id, + partition_id) + increment_value(self.total_grads_in_partition[i], partition_id) + + self.is_grad_computed[i][partition_id][param_id] = False + + self.grad_partition_insertion_offset[i][partition_id][ + param_id] = current_index - start_index + self.grad_start_offset[i][partition_id][param_id] = 0 + + elif start_index > current_index and start_index < (current_index + + param_size): + assert ( + first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + set_key_value_list(self.param_to_partition_ids[i], + param_id, + partition_id) + increment_value(self.total_grads_in_partition[i], partition_id) + + self.is_grad_computed[i][partition_id][param_id] = False + + self.grad_partition_insertion_offset[i][partition_id][param_id] = 0 + self.grad_start_offset[i][partition_id][param_id] = first_offset + + current_index = current_index + param_size + + def overlapping_partition_gradients_reduce_epilogue(self): + self.independent_gradient_partition_epilogue() + + def create_reduce_and_remove_grad_hooks(self): + self.grad_accs = [] + for i, param_group in enumerate(self.fp16_groups): + for param in param_group: + if param.requires_grad: + def wrapper(param, i): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def reduce_partition_and_remove_grads(*notneeded): + self.reduce_ready_partitions_and_remove_grads( + param, i) + + grad_acc.register_hook( + reduce_partition_and_remove_grads) + self.grad_accs.append(grad_acc) + + wrapper(param, i) + + def get_param_id(self, param): + unique_id = id(param) + return self.param_id[unique_id] + + def report_ipg_memory_usage(self, tag, param_elems): + elem_count = self.elements_in_ipg_bucket + param_elems + percent_of_bucket_size = ( + 100.0 * elem_count) // self.reduce_bucket_size + if self.verbose: + report_memory_usage( + f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}" + ) + + # create a flat tensor aligned at the alignment boundary + def flatten_dense_tensors_aligned(self, tensor_list, alignment): + num_elements = 0 + for tensor in tensor_list: + num_elements = num_elements + tensor.numel() + + remaining = num_elements % alignment + + if remaining: + elements_to_add = alignment - remaining + pad_tensor = torch.zeros(elements_to_add, + device=tensor_list[0].device, + dtype=tensor_list[0].dtype) + padded_tensor_list = tensor_list + [pad_tensor] + + num_elements = num_elements + elements_to_add + else: + padded_tensor_list = tensor_list + + return self.flatten(padded_tensor_list) + + ############### Independent Partition Gradient ######################## + def reduce_independent_p_g_buckets_and_remove_grads(self, param, i): + if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size: + self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", + param.numel()) + self.reduce_ipg_grads() + if self.contiguous_gradients and self.overlap_comm: + # Swap ipg_index between 0 and 1 + self.ipg_index = 1 - self.ipg_index + + self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads", + param.numel()) + + param_id = self.get_param_id(param) + assert self.params_already_reduced[param_id] == False, \ + f"The parameter {param_id} has already been reduced. \ + Gradient computed twice for this partition. \ + Multiple gradient reduction is currently not supported" + + if param.numel() > self.reduce_bucket_size: + self.extra_large_param_to_reduce = param + + elif self.contiguous_gradients: + # keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening + new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow( + 0, + self.elements_in_ipg_bucket, + param.numel()) + new_grad_tensor.copy_(param.grad.view(-1)) + param.grad.data = new_grad_tensor.data.view_as(param.grad) + + self.elements_in_ipg_bucket += param.numel() + + assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient" + + self.grads_in_ipg_bucket.append(param.grad) + self.params_in_ipg_bucket.append((i, param, param_id)) + + # make sure the average tensor function knows how to average the gradients + if is_moe_param(param): + self.ipg_bucket_has_moe_params = True + + self.report_ipg_memory_usage("End ipg_remove_grads", 0) + + def print_rank_0(self, message): + if dist.get_rank() == 0 and self.verbose: + print(message) + + def gradient_reduction_w_predivide(self, tensor): + + dp_world_size = dist.get_world_size(group=self.dp_process_group) + + tensor_to_allreduce = tensor + + if self.allreduce_always_fp32: + tensor_to_allreduce = tensor.float() + + if self.postscale_gradients: + if self.gradient_predivide_factor != 1.0: + tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor) + + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + + if self.gradient_predivide_factor != dp_world_size: + tensor_to_allreduce.mul_( + self.gradient_predivide_factor / dp_world_size) + else: + tensor_to_allreduce.div_(dp_world_size) + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + + if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce: + tensor.copy_(tensor_to_allreduce) + + return tensor + + def average_tensor(self, tensor): + if self.overlap_comm: + torch.cuda.synchronize() + stream = self.reduction_stream + else: + stream = torch.cuda.current_stream() + + with torch.cuda.stream(stream): + if not self.reduce_scatter: + self.gradient_reduction_w_predivide(tensor) + return + + # Accumulate destination ranks and bucket offsets for each gradient slice. + # Note: potential future optimization, record access pattern of parameters + # in backward pass and partition gradients w.r.t. access pattern so that our + # bucket is guaranteed to be contiguous w.r.t. ranks + rank_and_offsets = [] + real_dp_process_group = [] + curr_size = 0 + prev_id = -1 + + process_group = self.dp_process_group + # count = 0 + for i, param, param_id in self.params_in_ipg_bucket: + + process_group = self.dp_process_group + # Averages gradients at parameter level if ipg has a moe param + # Otherwise averaging is done at the entire buffer level at the end of the loop + if self.ipg_bucket_has_moe_params: + process_group = self.expert_dp_process_group if is_moe_param( + param) else self.dp_process_group + param.grad.data.div_( + dist.get_world_size(group=process_group)) + + partition_ids = self.param_to_partition_ids[i][param_id] + partition_size = self.partition_size[i] + # Get all partition ids + their offsets + partition_ids_w_offsets = [] + for partition_id in partition_ids: + offset = self.grad_start_offset[i][partition_id][param_id] + partition_ids_w_offsets.append((partition_id, offset)) + partition_ids_w_offsets.sort(key=lambda t: t[1]) + + # Calculate rank and offsets for grad slices + for idx in range(len(partition_ids_w_offsets)): + partition_id, offset = partition_ids_w_offsets[idx] + + # if dist.get_rank() == 0 and count < 100: + # print(f"Rank {dist.get_rank()} rank offet id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}") + # count += 1 + + # Calculate numel for grad slice depending on partition location + if idx == len(partition_ids_w_offsets) - 1: + # Last partition_id uses its own offset + numel = param.numel() - offset + else: + # Set numel to next partition's offset + numel = partition_ids_w_offsets[idx + 1][1] - offset + + # Merge bucket ranges if they belong to the same rank + if partition_id == prev_id: + prev_pid, prev_size, prev_numel = rank_and_offsets[-1] + rank_and_offsets[-1] = (prev_pid, + prev_size, prev_numel + numel) + else: + rank_and_offsets.append( + (partition_id, curr_size, numel)) + real_dp_process_group.append(process_group) + curr_size += numel + prev_id = partition_id + + if not self.ipg_bucket_has_moe_params: + tensor.div_(dist.get_world_size(group=self.dp_process_group)) + + async_handles = [] + for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets): + grad_slice = tensor.narrow(0, int(bucket_offset), int(numel)) + # if dist.get_rank() == 0: + # print(f"Rank {dist.get_rank()} rank offet id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}") + # dist.barrier() + # dist.barrier() + dst_rank = _get_global_rank(real_dp_process_group[i], dst) + async_handle = dist.reduce(grad_slice, + dst=dst_rank, + group=real_dp_process_group[i], + async_op=True) + async_handles.append(async_handle) + + for handle in async_handles: + handle.wait() + + ############################################################################## + ############################# CPU Offload Methods############################# + ############################################################################## + def get_grad_position(self, group_id, tensor_list, first_offset, partition_size): + current_offset = 0 + + for i, tensor in enumerate(tensor_list): + param_id = self.get_param_id(tensor) + param_start_offset = 0 + + num_elements = tensor.numel() + tensor_offset = 0 + + # we need to offset to get to the right element + if i == 0 and first_offset > 0: + tensor_offset = first_offset + num_elements = num_elements - tensor_offset + param_start_offset = first_offset + + # we dont need all elements of the tensor + if num_elements > (partition_size - current_offset): + num_elements = partition_size - current_offset + + self.grad_position[param_id] = [ + int(group_id), + int(param_start_offset), + int(current_offset), + int(num_elements) + ] + current_offset += num_elements + + def update_overflow_tracker_for_param_grad(self, param): + if param.grad is not None and self._has_inf_or_nan(param.grad.data): + self.local_overflow = True + + def async_accumulate_grad_in_cpu_via_gpu(self, param): + param_id = self.get_param_id(param) + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + # copy to a preexisiting buffer to avoid memory allocation penalty + dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow( + 0, + 0, + param.numel()) + + # buffer for storing gradients for this parameter in CPU + def buffer_to_accumulate_to_in_cpu(): + if not self.fp16_master_weights_and_gradients: + return torch.zeros(param.numel(), + dtype=param.dtype, + device=self.device).pin_memory() + else: + return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow( + 0, + dest_offset, + num_elements) + + # accumulate gradients into param.grad or parts of it that belongs to this parittion + def accumulate_gradients(): + if not self.fp16_master_weights_and_gradients: + dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1), + non_blocking=True) + param.grad.data.view(-1).add_(dest_buffer) + else: + dest_buffer.narrow(0, + source_offset, + num_elements).copy_( + self.accumulated_grads_in_cpu[param_id].view(-1), + non_blocking=True) + param.grad.data.view(-1).narrow( + 0, + source_offset, + num_elements).add_(dest_buffer.narrow(0, + source_offset, + num_elements)) + + # move accumulated gradients back to CPU + def copy_gradients_to_cpu(): + if not self.fp16_master_weights_and_gradients: + self.accumulated_grads_in_cpu[param_id].data.copy_( + param.grad.data.view(-1), + non_blocking=True) + else: + self.accumulated_grads_in_cpu[param_id].data.copy_( + param.grad.data.view(-1).narrow(0, + source_offset, + num_elements), + non_blocking=True) + + if param_id not in self.accumulated_grads_in_cpu: + self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu( + ) + + if self.micro_step_id > 0: + accumulate_gradients() + + # at the boundary we will send 32bit directly + if not self.is_gradient_accumulation_boundary: + copy_gradients_to_cpu() + + def set_norm_for_param_grad(self, param): + param_id = self.get_param_id(param) + accumulated_grad = self.accumulated_grads_in_cpu[ + param_id] if self.gradient_accumulation_steps > 1 else param.grad + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + start = source_offset + accumulated_grad = accumulated_grad.view( + -1).narrow(0, start, num_elements) + + self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm( + 2) + + def set_norm_for_param_grad_in_gpu(self, param): + param_id = self.get_param_id(param) + accumulated_grad = param.grad + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + start = source_offset + accumulated_grad = accumulated_grad.view( + -1).narrow(0, start, num_elements) + + self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm( + 2) + + def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param): + param_id = self.get_param_id(param) + + [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] + + dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow( + 0, + dest_offset, + num_elements) + + src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements) + if not self.fp16_master_weights_and_gradients: + src_tensor = src_tensor.float() + + dest_tensor.copy_(src_tensor, non_blocking=True) + param.grad = None # offload only + + def complete_grad_norm_calculation_for_cpu_offload(self, params): + total_norm = 0.0 + norm_type = 2.0 + for p in params: + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + param_id = self.get_param_id(p) + # as some model have trainable parameters but skipped in training, + # their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run, + # so they have no norm_for_param_grads + if param_id in self.norm_for_param_grads: + param_norm = self.norm_for_param_grads[param_id] + total_norm += param_norm.item() ** 2 + else: + # As unused parameters in modules may not be expected sometimes, + # add an explicit error msg when it occurred and an option to + # avoid the error + assert self.ignore_unused_parameters, """ + This assert indicates that your module has parameters that + were not used in producing loss. + You can avoid this assert by + (1) enable ignore_unused_parameters option in zero_optimization config; + (2) making sure all trainable parameters and `forward` function + outputs participate in calculating loss. + """ + + # Sum across all model parallel GPUs. + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.SUM, + group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, + op=torch.distributed.ReduceOp.SUM) + + total_norm = total_norm_cuda[0].item() ** (1. / norm_type) + + if total_norm == float( + 'inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + ############################################################################################ + + def copy_grads_in_partition(self, param): + if self.cpu_offload: + + if self.gradient_accumulation_steps > 1: + self.async_accumulate_grad_in_cpu_via_gpu(param) + + if self.is_gradient_accumulation_boundary: + self.set_norm_for_param_grad_in_gpu(param) + + self.update_overflow_tracker_for_param_grad(param) + + self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param) + + return + # print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}") + if self.grads_in_partition is None: + self.grads_in_partition_offset = 0 + total_size = 0 + for group in self.params_in_partition: + for param_in_partition in group: + total_size += param_in_partition.numel() + + if self.verbose: + report_memory_usage( + f"before copying {total_size} gradients into partition") + self.grads_in_partition = torch.empty(int(total_size), + dtype=self.dtype, + device=torch.cuda.current_device()) + + if self.verbose: + report_memory_usage( + f"after copying {total_size} gradients into partition") + + # The allreduce buffer will be rewritted. Copy the gradients in partition to a new buffer + new_grad_tensor = self.grads_in_partition.view(-1).narrow( + 0, + self.grads_in_partition_offset, + param.numel()) + new_grad_tensor.copy_(param.grad.view(-1)) + param.grad.data = new_grad_tensor.data.view_as(param.grad) + # print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}") + self.grads_in_partition_offset += param.numel() + + def reduce_ipg_grads(self): + if self.contiguous_gradients: + if self.extra_large_param_to_reduce is not None: + assert len( + self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen" + _, _, param_id = self.params_in_ipg_bucket[0] + assert self.get_param_id( + self.extra_large_param_to_reduce) == param_id, "param in ipg bucket does not match extra-large param" + self.average_tensor( + self.extra_large_param_to_reduce.grad.view(-1)) + self.extra_large_param_to_reduce = None + else: + self.average_tensor(self.ipg_buffer[self.ipg_index]) + else: + self.buffered_reduce_fallback( + None, + self.grads_in_ipg_bucket, + elements_per_buffer=self.elements_in_ipg_bucket) + + if self.overlap_comm: + stream = self.reduction_stream + elif self.cpu_offload: + # TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed. + # torch.cuda.synchronize() + # stream = self.copy_grad_stream + stream = torch.cuda.current_stream() + else: + stream = torch.cuda.current_stream() + + with torch.cuda.stream(stream): + for _, param, param_id in self.params_in_ipg_bucket: + + assert self.params_already_reduced[param_id] == False, \ + f"The parameter {param_id} has already been reduced. \ + Gradient computed twice for this partition. \ + Multiple gradient reduction is currently not supported" + + self.params_already_reduced[param_id] = True + + if self.partition_gradients: + if not self.is_param_in_current_partition[param_id]: + if self.overlap_comm and self.contiguous_gradients is False: + # Clear grads of other partitions during the next reduction + # to avoid clearing them before the reduction is complete. + if self.previous_reduced_grads is None: + self.previous_reduced_grads = [] + self.previous_reduced_grads.append(param) + else: + param.grad = None # only if self.partition_gradients + elif self.contiguous_gradients: + self.copy_grads_in_partition(param) + + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + self.ipg_bucket_has_moe_params = False + self.elements_in_ipg_bucket = 0 + ##################################################################### + + def reduce_ready_partitions_and_remove_grads(self, param, i): + if self.partition_gradients or self.is_gradient_accumulation_boundary: + self.reduce_independent_p_g_buckets_and_remove_grads(param, i) + + def zero_reduced_gradients(self, partition_id, i): + def are_all_related_partitions_reduced(params_id): + for partition_id in self.param_to_partition_ids[i][params_id]: + if not self.is_partition_reduced[i][partition_id]: + return False + return True + + for params_id in self.is_grad_computed[i][partition_id]: + if are_all_related_partitions_reduced(params_id): + self.param_dict[params_id].grad = None # dead code + + def flatten_and_print(self, message, tensors, start=0, n=5): + flatten_tensor = self.flatten(tensors) + + def print_func(): + print(flatten_tensor.contiguous().view(-1).narrow(0, start, n)) + + self.sequential_execution(print_func, message) + + def get_grads_to_reduce(self, i, partition_id): + def get_reducable_portion(key): + grad = self.param_dict[key].grad + total_elements = grad.numel() + start = self.grad_start_offset[i][partition_id][key] + num_elements = min( + total_elements - start, + self.partition_size[i] - + self.grad_partition_insertion_offset[i][partition_id][key]) + if not pg_correctness_test: + if num_elements == total_elements: + return grad + else: + return grad.contiguous().view(-1).narrow(0, + int(start), + int(num_elements)) + else: + if num_elements == total_elements: + return grad.clone() + else: + return grad.clone().contiguous().view(-1).narrow( + 0, + int(start), + int(num_elements)) + + grads_to_reduce = [] + for key in self.is_grad_computed[i][partition_id]: + grad = get_reducable_portion(key) + grads_to_reduce.append(grad) + return grads_to_reduce + + def sequential_execution(self, function, message, group=None): + if group is None: + group = self.dp_process_group + if dist.get_rank(group=group) == 0: + print(message) + for id in range(dist.get_world_size(group=group)): + if id == dist.get_rank(group=group): + function() + dist.barrier(group=group) + + def set_none_gradients_to_zero(self, i, partition_id): + for param_id in self.is_grad_computed[i][partition_id]: + param = self.param_dict[param_id] + if param.grad is None: + param.grad = torch.zero_like(param) + + ######################Reduction Related Methods############################## + + def allreduce_bucket(self, bucket, allreduce_always_fp32=False, rank=None, log=None): + rank = None + tensor = self.flatten(bucket) + + tensor_to_allreduce = tensor + + if pg_correctness_test: + allreduce_always_fp32 = True + + if allreduce_always_fp32: + tensor_to_allreduce = tensor.float() + + tensor_to_allreduce.div_( + dist.get_world_size(group=self.dp_process_group)) + + if rank is None: + # "All Reducing" + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + else: + global_rank = _get_global_rank(self.dp_process_group, rank) + dist.reduce(tensor_to_allreduce, global_rank, + group=self.dp_process_group) + + if allreduce_always_fp32 and tensor is not tensor_to_allreduce: + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + tensor.copy_(tensor_to_allreduce) + + return tensor + + def _clear_previous_reduced_grads(self): + if self.previous_reduced_grads is not None: + for param in self.previous_reduced_grads: + param.grad = None # overlap enabled + self.previous_reduced_grads = None + + # if rank is specified do a reduction instead of an allreduce + def allreduce_and_copy(self, small_bucket, rank=None, log=None): + if self.overlap_comm: + torch.cuda.synchronize() + # It is safe to clear the previously reduced grads of other partitions + self._clear_previous_reduced_grads() + stream = self.reduction_stream + else: + stream = torch.cuda.current_stream() + + with torch.cuda.stream(stream): + allreduced = self.allreduce_bucket( + small_bucket, rank=rank, log=log) + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): + buf.copy_(synced) + + def allreduce_no_retain(self, + bucket, + numel_per_bucket=500000000, + rank=None, + log=None): + small_bucket = [] + numel = 0 + for tensor in bucket: + small_bucket.append(tensor) + numel = numel + tensor.numel() + if numel > numel_per_bucket: + self.allreduce_and_copy(small_bucket, rank=rank, log=None) + small_bucket = [] + + if len(small_bucket) > 0: + self.allreduce_and_copy(small_bucket, rank=rank, log=log) + + # allows using reduction of gradients instead of using all_reduce + + def buffered_reduce_fallback(self, + rank, + grads, + elements_per_buffer=500000000, + log=None): + split_buckets = split_half_float_double(grads) + + for i, bucket in enumerate(split_buckets): + self.allreduce_no_retain(bucket, + numel_per_bucket=elements_per_buffer, + rank=rank, + log=log) + + ############################################################################# + ############################################################################# + ############################################################################# + + # views the tensor as multiple partitions and returns + # those partitions + def get_data_parallel_partitions(self, tensor, group_id): + partitions = [] + + dp = dist.get_world_size(group=self.real_dp_process_group[group_id]) + dp_id = dist.get_rank(group=self.real_dp_process_group[group_id]) + + total_num_elements = tensor.numel() + + base_size = total_num_elements // dp + remaining = total_num_elements % dp + + start = 0 + for id in range(dp): + partition_size = base_size + if id < remaining: + partition_size = partition_size + 1 + partitions.append(tensor.narrow(0, start, partition_size)) + start = start + partition_size + return partitions + + def get_partition_info(self, tensor_list, partition_size, partition_id): + params_in_partition = [] + params_not_in_partition = [] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for tensor in tensor_list: + + tensor_size = tensor.numel() + + if (current_index >= start_index and current_index < end_index): + params_in_partition.append(tensor) + + elif start_index > current_index and start_index < (current_index + + tensor_size): + params_in_partition.append(tensor) + + assert ( + first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + else: + params_not_in_partition.append(tensor) + + current_index = current_index + tensor_size + + return params_in_partition, params_not_in_partition, first_offset + + def zero_grad(self, set_grads_to_None=True): + """ + Zero FP16 parameter grads. + """ + # FP32 grad should never exist. + # For speed, set model fp16 grad to None by default + for group in self.fp16_groups: + for p in group: + if set_grads_to_None: + p.grad = None # epilogue and in step + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def _model_parallel_all_reduce(self, tensor, op): + """ Perform all reduce within model parallel group, if any. + """ + if self.model_parallel_group is None: + pass + else: + torch.distributed.all_reduce(tensor=tensor, + op=op, + group=self.model_parallel_group) + + def get_grad_norm_direct(self, gradients, params, norm_type=2): + """Clips gradient norm of an iterable of parameters. + + This is adapted from ``torch.nn.utils.clip_grad.clip_grad_norm_`` and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + norm_type = float(norm_type) + if norm_type == inf: + total_norm = max(g.data.abs().max() for g in gradients) + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.MAX, + group=self.dp_process_group) + + # Take max across all GPUs. + self._model_parallel_all_reduce(tensor=total_norm_cuda, + op=torch.distributed.ReduceOp.MAX) + total_norm = total_norm_cuda[0].item() + else: + total_norm = 0.0 + # if dist.get_rank() == 0: + # print()(f"Total Norm begining {total_norm}") + for g, p in zip(gradients, params): + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + param_norm = g.data.double().norm(2) + total_norm += param_norm.item() ** 2 + # Sum across all model parallel GPUs. + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.SUM, + group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, + op=torch.distributed.ReduceOp.SUM) + + total_norm = total_norm_cuda[0].item() ** (1. / norm_type) + + if total_norm == float( + 'inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + # creates a flat fused tensor from the tensor list starting at the first_offset + # in the first tensor of the list. If there are not enough elements in the tensor + # list then the flat tensor will be padded with zeros + def get_flat_partition(self, + tensor_list, + first_offset, + partition_size, + dtype, + device, + return_tensor_list=False): + flat_tensor_list = [] + current_size = 0 + for i, tensor in enumerate(tensor_list): + if tensor.grad is None: + tensor.grad = torch.zeros_like(tensor) + + tensor = tensor.grad + num_elements = tensor.numel() + tensor_offset = 0 + + # we need to offset to get to the right element + if i == 0 and first_offset > 0: + tensor_offset = first_offset + num_elements = num_elements - tensor_offset + + # we dont need all elements of the tensor + if num_elements > (partition_size - current_size): + num_elements = partition_size - current_size + + # we need a narrow view of the tensor based on the tensor offset and number of elements that + # we need from this tensor + if tensor_offset > 0 or num_elements < tensor.numel(): + flat_tensor_list.append(tensor.contiguous().view(-1).narrow( + 0, + int(tensor_offset), + int(num_elements))) + else: + flat_tensor_list.append(tensor) + + current_size = current_size + num_elements + + # this means its the last partition and does not align with the dp boundary. We need to pad before flattening + if current_size < partition_size: + flat_tensor_list.append( + torch.zeros(int(partition_size - current_size), + dtype=dtype, + device=device)) + + if return_tensor_list: + return flat_tensor_list + + return self.flatten(flat_tensor_list) + + def free_grad_in_param_list(self, param_list): + for p in param_list: + p.grad = None # in step + + def reset_cpu_buffers(self): + self.norm_for_param_grads = {} + self.local_overflow = False + + def log_timers(self, timer_names): + if self.timers is None: + return + + self.timers.log(names=list(timer_names)) + + def start_timers(self, timer_names): + if self.timers is None: + return + + for name in timer_names: + self.timers(name).start() + + def stop_timers(self, timer_names): + if self.timers is None: + return + + for name in timer_names: + self.timers(name).stop() + + def step(self, closure=None): + """ + Not supporting closure. + """ + self.micro_step_id = -1 + + if self.verbose: + report_memory_usage(f"In step before checking overflow") + + # First compute norm for all group so we know if there is overflow + self.check_overflow(self.partition_gradients) + + OPTIMIZER_ALLGATHER = 'optimizer_allgather' + OPTIMIZER_GRADIENTS = 'optimizer_gradients' + OPTIMIZER_STEP = 'optimizer_step' + timer_names = [OPTIMIZER_ALLGATHER, + OPTIMIZER_GRADIENTS, OPTIMIZER_STEP] + + prev_scale = self.loss_scale + self._update_scale(self.overflow) + if self.overflow: + if self.verbose: + report_memory_usage('After overflow before clearing gradients') + self.zero_grad() + if self.cpu_offload: + self.reset_cpu_buffers() + else: + self.averaged_gradients = {} + + if self.verbose: + report_memory_usage('After overflow after clearing gradients') + + print( + "[deepspeed] fp16 dynamic loss scale overflow! Rank {} Skipping step. Attempted loss scale: {}, " + "reducing to {}".format(dist.get_rank(), + prev_scale, + self.loss_scale)) + self.start_timers(timer_names) + self.stop_timers(timer_names) + return + + self.start_timers([OPTIMIZER_GRADIENTS]) + norm_groups = [] + single_partition_grad_groups = [] + skip = False + for i, group in enumerate(self.fp16_groups): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + if self.cpu_offload: + norm_groups.append( + self.complete_grad_norm_calculation_for_cpu_offload( + self.params_in_partition[i])) + single_grad_partition = self.single_partition_of_fp32_groups[i].grad + else: + norm_groups.append( + self.get_grad_norm_direct(self.averaged_gradients[i], + self.params_in_partition[i])) + + # free gradients for all the prameters that are not updated by this process + self.free_grad_in_param_list(self.params_not_in_partition[i]) + + # create a flat gradients for parameters updated by this process + # If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors + if partition_id == dist.get_world_size( + group=self.real_dp_process_group[i]) - 1: + single_grad_partition = self.flatten_dense_tensors_aligned( + self.averaged_gradients[i], + int(self.partition_size[i])).to( + self.single_partition_of_fp32_groups[i].dtype) + else: + single_grad_partition = self.flatten(self.averaged_gradients[i]).to( + self.single_partition_of_fp32_groups[i].dtype) + assert single_grad_partition.numel() == self.partition_size[i], \ + "averaged gradients have different number of elements that partition size {} {} {} {}".format( + single_grad_partition.numel(), self.partition_size[i], i, partition_id) + + self.single_partition_of_fp32_groups[i].grad = single_grad_partition + # release all the gradient since we have already created a necessary copy in dp_grad_partition + self.free_grad_in_param_list(self.params_in_partition[i]) + + self.averaged_gradients[i] = None + + single_partition_grad_groups.append(single_grad_partition) + + if self.has_moe_layers: + self._average_expert_grad_norms(norm_groups) + + self.unscale_and_clip_grads(single_partition_grad_groups, norm_groups) + self.stop_timers([OPTIMIZER_GRADIENTS]) + + self.start_timers([OPTIMIZER_STEP]) + if self.deepspeed_adam_offload: + from deepspeed.ops.adam import DeepSpeedCPUAdam + if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half: + fp16_param_groups = [ + fp16_partitions[partition_id] + for fp16_partitions in self.parallel_partitioned_fp16_groups + ] + self.optimizer.step(fp16_param_groups=fp16_param_groups) + else: + self.optimizer.step() + for fp16_partitions, fp32_partition in zip(self.parallel_partitioned_fp16_groups, + self.single_partition_of_fp32_groups): + fp16_partitions[partition_id].data.copy_( + fp32_partition.data) + else: + self.optimizer.step() + + # get rid of the fp32 gradients. Not needed anymore + if not self.cpu_offload: + for group in self.single_partition_of_fp32_groups: + group.grad = None # in step + + for fp16_partitions, fp32_partition in zip(self.parallel_partitioned_fp16_groups, + self.single_partition_of_fp32_groups): + fp16_partitions[partition_id].data.copy_(fp32_partition.data) + + self.stop_timers([OPTIMIZER_STEP]) + + if self.cpu_offload: + self.reset_cpu_buffers() + + self.start_timers([OPTIMIZER_ALLGATHER]) + # gather the updated weights from everyone + for group_id, partitioned_params in enumerate(self.parallel_partitioned_fp16_groups): + + # Sequential AllGather Best of both worlds + dp_world_size = dist.get_world_size( + group=self.real_dp_process_group[group_id]) + num_shards = max( + 1, + partitioned_params[partition_id].numel() * dp_world_size // + self.allgather_bucket_size) + + shard_size = partitioned_params[partition_id].numel() // num_shards + num_elements = shard_size + + assert shard_size * \ + num_shards <= partitioned_params[partition_id].numel() + + for shard_id in range(num_shards): + + if shard_id == (num_shards - 1): + num_elements = partitioned_params[partition_id].numel( + ) - shard_id * shard_size + + shard_list = [] + for dp_id in range(dp_world_size): + curr_shard = partitioned_params[dp_id].narrow( + 0, + shard_id * shard_size, + num_elements).detach() + shard_list.append(curr_shard) + + dist.all_gather(shard_list, + shard_list[partition_id], + group=self.real_dp_process_group[group_id]) + self.stop_timers([OPTIMIZER_ALLGATHER]) + + # TODO: we probably don't need this? just to be safe + for i in range(len(norm_groups)): + self._update_model_fp16_weights(i) + + self.log_timers(timer_names) + if self.verbose: + report_memory_usage('After zero_optimizer step') + + return + + def _average_expert_grad_norms(self, norm_groups): + for i, norm in enumerate(norm_groups): + if self.is_moe_param_group[i]: + scaled_norm = norm * 1.0 / float( + dist.get_world_size(group=self.ep_process_group)) + scaled_norm_tensor = torch.tensor(scaled_norm, + device='cuda', + dtype=torch.float) + dist.all_reduce(scaled_norm_tensor, + group=self.ep_process_group) + norm_groups[i] = scaled_norm_tensor.item() + + def unscale_and_clip_grads(self, grad_groups_flat, norm_groups): + total_norm = 0.0 + for norm in norm_groups: + total_norm += norm ** 2.0 + total_norm = math.sqrt(total_norm) + + # compute combined scale factor for this group + combined_scale = self.loss_scale + if self.clip_grad > 0.: + # norm is in fact norm*scale + clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad + if clip > 1: + combined_scale = clip * self.loss_scale + + for grad in grad_groups_flat: + if isinstance(grad, list): + sub_partitions = grad + for g in sub_partitions: + g.data.mul_(1. / combined_scale) + else: + grad.data.mul_(1. / combined_scale) + + def _check_overflow(self, partition_gradients=True): + self.overflow = self.has_overflow(partition_gradients) + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params, is_grad_list=False): + for p in params: + if p.grad is not None and self._has_inf_or_nan(p.grad.data): + return True + + return False + + def has_overflow_partitioned_grads_serial(self): + for i in range(len(self.fp16_groups)): + for j, grad in enumerate(self.averaged_gradients[i]): + if grad is not None and self._has_inf_or_nan(grad.data, j): + return True + return False + + def has_overflow(self, partition_gradients=True): + if partition_gradients: + overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial( + ) + overflow_gpu = torch.cuda.ByteTensor([overflow]) + '''This will capture overflow across all data parallel and expert parallel process + Since expert parallel process are a subset of data parallel process''' + torch.distributed.all_reduce(overflow_gpu, + op=torch.distributed.ReduceOp.MAX, + group=self.dp_process_group) + + else: + params = [] + for group in self.fp16_groups: + for param in group: + params.append(param) + + overflow = self.has_overflow_serial( + params, is_grad_list=partition_gradients) + overflow_gpu = torch.cuda.ByteTensor([overflow]) + + # Since each model parallel GPU carries only part of the model, + # make sure overflow flag is synced across all the model parallel GPUs + self._model_parallel_all_reduce(tensor=overflow_gpu, + op=torch.distributed.ReduceOp.MAX) + + overflow = overflow_gpu[0].item() + return bool(overflow) + + # `x` is a torch.Tensor + @staticmethod + def _has_inf_or_nan(x, j=None): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: + return True + return False + + def backward(self, loss, retain_graph=False): + """ + :attr:`backward` performs the following steps: + + 1. fp32_loss = loss.float() + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves + """ + self.micro_step_id += 1 + + if self.contiguous_gradients: + self.ipg_buffer = [] + buf_0 = torch.empty(int(self.reduce_bucket_size), + dtype=self.dtype, + device=torch.cuda.current_device()) + self.ipg_buffer.append(buf_0) + + # Use double buffers to avoid data access conflict when overlap_comm is enabled. + if self.overlap_comm: + buf_1 = torch.empty(int(self.reduce_bucket_size), + dtype=self.dtype, + device=torch.cuda.current_device()) + self.ipg_buffer.append(buf_1) + self.ipg_index = 0 + + self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) + + def check_overflow(self, partition_gradients=True): + self._check_overflow(partition_gradients) + + def _update_scale(self, has_overflow=False): + self.loss_scaler.update_scale(has_overflow) + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + return self.loss_scaler.loss_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + cur_scale = property(_get_loss_scale, _set_loss_scale) + + # Return group tensor after removing paddings that are added for alignment to DP world size. + # This method works on the assumption that each group contains a single flattened tensor. + def _get_groups_without_padding(self, groups_with_padding): + groups_without_padding = [] + for i, group in enumerate(groups_with_padding): + lean_length = group.numel() - self.groups_padding[i] + groups_without_padding.append(group[:lean_length]) + + return groups_without_padding + + # Return optimizer state after removing paddings that are added for alignment. + def _get_state_without_padding(self, state_with_padding, padding): + lean_state = {} + for key, value in state_with_padding.items(): + if torch.is_tensor(value): + lean_length = value.numel() - padding + lean_state[key] = value[:lean_length] + else: + lean_state[key] = value + + return lean_state + + # Return base optimizer states. + # This method assumes that each param group contains a single flattened tensor. + def _get_base_optimizer_state(self): + optimizer_groups_state = [] + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + lean_optimizer_state = self._get_state_without_padding( + self.optimizer.state[p], + self.groups_padding[i]) + optimizer_groups_state.append(lean_optimizer_state) + + return optimizer_groups_state + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + + Example:: + + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + state_dict = {} + state_dict['loss_scaler'] = self.loss_scaler + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['overflow'] = self.overflow + state_dict['base_optimizer_state'] = self._get_base_optimizer_state() + + state_dict['zero_stage'] = ZERO_OPTIMIZATION_GRADIENTS + state_dict['partition_count'] = self.partition_count + + state_dict['ds_version'] = version + + # Remove paddings for DP alignment to enable loading for other alignment values + fp32_groups_without_padding = self._get_groups_without_padding( + self.single_partition_of_fp32_groups) + state_dict['single_partition_of_fp32_groups'] = fp32_groups_without_padding + + # if self.cpu_offload: + # state_dict_tmp = async_copy_to(state_dict, + # 'cpu', + # torch.cuda.current_stream()) + # state_dict = state_dict_tmp + + return state_dict + + # Restore base optimizer fp32 weights from checkpoint by: + # 1) Merging fp32 weights from checkpoints of all partitions + # 2) Extracting fp32 weights for current partition from merged weights + # 3) Using extracted weights to update base optimizer weights directly. + def _restore_from_fp32_weights(self, all_state_dict): + merged_single_partition_of_fp32_groups = [] + for i in range(len(self.single_partition_of_fp32_groups)): + partition_id = dist.get_rank(group=self.real_dp_process_group[i]) + merged_partitions = [ + sd['single_partition_of_fp32_groups'][i] for sd in all_state_dict + ] + flat_merged_partitions = self.flatten_dense_tensors_aligned( + merged_partitions, + self.nccl_start_alignment_factor * + dist.get_world_size(group=self.real_dp_process_group[i])) + dp_partitions = self.get_data_parallel_partitions( + flat_merged_partitions, i) + merged_single_partition_of_fp32_groups.append( + dp_partitions[partition_id]) + + for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups): + current.data.copy_(saved.data) + + # Restore base optimizer fp32 weights from ZeRO fp16 weights + def _restore_from_fp16_weights(self): + for group_id, fp16_partitions, fp32_partition in enumerate( + zip(self.parallel_partitioned_fp16_groups, self.single_partition_of_fp32_groups)): + partition_id = dist.get_rank( + group=self.real_dp_process_group[group_id]) + fp32_partition.data.copy_(fp16_partitions[partition_id].data) + + # Refresh the fp32 master params from the fp16 copies. + def refresh_fp32_params(self): + self._restore_from_fp16_weights() + + # Extract optimizer state for current partition from merged states of all partitions + def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id): + partition_id = dist.get_rank( + group=self.real_dp_process_group[group_id]) + alignment = dist.get_world_size( + group=self.real_dp_process_group[group_id]) + if torch.is_tensor(all_partition_states[0]): + flat_merged_partitions = self.flatten_dense_tensors_aligned( + all_partition_states, + alignment) + dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, + group_id) + return dp_partitions[partition_id] + else: + # Assume non-tensor states are not partitioned and equal across ranks, so return first one + return all_partition_states[0] + + # Restore base optimizer state from checkpoint by + # 1) Merging optimizer state from checkpoints of all partitions + # 2) Extracting optimizer state for current partition from the merged state + # 3) Using the extracted value to directly update the base optimizer. + def _restore_base_optimizer_state(self, all_state_dict): + base_optimizer_group_states = [] + for i in range(len(self.optimizer.param_groups)): + partition_states = {} + all_partition_group_states = [ + sd['base_optimizer_state'][i] for sd in all_state_dict + ] + for key in all_partition_group_states[0].keys(): + all_partition_states = [ + all_states[key] for all_states in all_partition_group_states + ] + partition_states[key] = self._partition_base_optimizer_state( + key, + all_partition_states, + i) + base_optimizer_group_states.append(partition_states) + + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + for key, saved in base_optimizer_group_states[i].items(): + if torch.is_tensor(self.optimizer.state[p][key]): + self.optimizer.state[p][key].data.copy_(saved.data) + else: + self.optimizer.state[p][key] = saved + + def load_state_dict(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False): + r"""Loading ZeRO checkpoint + + Arguments: + state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. + Note that the number of saved partitions may differ from number of loading partitions to support + changing GPU count, specifically DP world size, between saving and loading checkpoints. + load_optimizer_states: Boolean indicating whether or not to load base optimizer states + load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32 + copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss). + """ + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + + Example:: + + model = torch.nn.Linear(D_in, D_out).cuda().half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + # I think it should actually be ok to reload the optimizer before the model. + self.loss_scaler = state_dict_list[0]['loss_scaler'] + self.dynamic_loss_scale = state_dict_list[0]['dynamic_loss_scale'] + self.overflow = state_dict_list[0]['overflow'] + + # zero stage 1 mode + if not self.partition_gradients: + required_version = pkg_version.parse("0.3.17") + ckpt_version = state_dict_list[0].get("ds_version", False) + error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \ + "with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \ + "please set 'legacy_stage1': true in your zero config json. This old version of " \ + "stage 1 will be removed in v0.4.0." + + assert ckpt_version, f"Empty ds_version! {error_str}" + assert required_version <= pkg_version.parse( + ckpt_version), f"Old version: {ckpt_version} {error_str}" + + if load_optimizer_states: + self._restore_base_optimizer_state(state_dict_list) + + # At this point, the optimizer's references to the model's fp32 parameters are up to date. + # The optimizer's hyperparameters and internal buffers are also up to date. + # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still + # out of date. There are two options. + # 1: Refresh the master params from the model's fp16 params. + # This requires less storage but incurs precision loss. + # 2: Save and restore the fp32 master copies separately. + # We choose option 1 if changing DP degree and option 2 otherwise. + # + # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device + # of their associated parameters, because it's possible those buffers might not exist yet in + # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been + # constructed in the same way as the one whose state_dict we are loading, the same master params + # are guaranteed to exist, so we can just copy_() from the saved master params. + + if load_from_fp32_weights: + self._restore_from_fp32_weights(state_dict_list) + else: + self._restore_from_fp16_weights() + + def allreduce_gradients(self): + self.overlapping_partition_gradients_reduce_epilogue() + + +def _handle_overflow(cpu_sum, x, i): + import math + rank = torch.distributed.get_rank() + if rank == 0: + t_i = -1 + for v_i, v in enumerate(x.data.contiguous().view(-1)): + if not math.isfinite(float(v)): + t_i = v_i + break + print( + f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}" + ) + + +def estimate_zero2_model_states_mem_needs(total_params, + num_gpus_per_node=1, + num_nodes=1, + cpu_offload=True, + additional_buffer_factor=1.5): + total_gpus = num_nodes * num_gpus_per_node + + if cpu_offload: + gpu_mem = 2 * total_params + cpu_mem = total_params * \ + max(4 * total_gpus, 16) * additional_buffer_factor + else: + gpu_mem = 4 * total_params + int(16 * total_params / total_gpus) + cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor + + return int(cpu_mem), int(gpu_mem) + + +def model_to_params(model): + # shared params calculated only once + total_params = sum( + dict((p.data_ptr(), + p.numel()) for p in model.parameters()).values()) + return total_params + + +def estimate_zero2_model_states_mem_needs_all_live(model, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients + for a given ``model`` and hardware setup. + + If you have an actual model object, use this function and everything will be derived + automatically. + + If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass + the ``total_params`` explicitly. + + Args: + - ``model``: ``nn.Module`` object + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + total_params = model_to_params(model) + + estimate_zero2_model_states_mem_needs_all_cold( + total_params=total_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + additional_buffer_factor=additional_buffer_factor) + + +def estimate_zero2_model_states_mem_needs_all_cold(total_params, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients + for a given ``model`` and hardware setup. + + If it's a hypothetical model, use this function where you have to pass + the ``total_params`` and ``largest_layer_params`` explicitly. + + If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything + will be derived automatically. + + Args: + - ``total_params``: total model params + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + def format_options(cpu_offload): + enabled = [] + enabled.append(f"cpu_offload={1 if cpu_offload else 0}") + return ", ".join(enabled) + + nodes_str = "nodes" if num_nodes > 1 else "node" + gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU" + print( + "Estimated memory needed for params, optim states and gradients for a:\n" + f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n" + f"SW: Model with {int(total_params / 1e6)}M total params.") + print(" per CPU | per GPU | Options") + for cpu_offload in [True, False]: + cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs( + total_params=total_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + cpu_offload=cpu_offload, + additional_buffer_factor=additional_buffer_factor + ) + + options_str = format_options(cpu_offload=cpu_offload) + print( + f" {cpu_mem / 2 ** 30:7.2f}GB | {gpu_mem / 2 ** 30:6.2f}GB | {options_str}") diff --git a/colossalai/nn/optimizer/zero_redundancy_optimizer_level_3.py b/colossalai/nn/optimizer/zero_redundancy_optimizer_level_3.py new file mode 100644 index 000000000..6f5d7969c --- /dev/null +++ b/colossalai/nn/optimizer/zero_redundancy_optimizer_level_3.py @@ -0,0 +1,3598 @@ +""" +"Copyright 2020 The Microsoft DeepSpeed Team. +Licensed under the MIT license. +""" + +import math +from collections import OrderedDict + +import torch +import torch.distributed as dist + +try: + from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id, debug_param2name_id_numel, \ + debug_param2name_id_shape_device, debug_module2name_class + from deepspeed.ops.adam import DeepSpeedCPUAdam + from deepspeed.ops.op_builder import UtilsBuilder + from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper + from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper + from deepspeed.runtime.utils import is_model_parallel_parameter + from deepspeed.runtime.zero.constants import ZERO_OPTIMIZATION_WEIGHTS + from deepspeed.runtime.zero.partition_parameters import * + from deepspeed.runtime.zero.partition_parameters import _init_external_params +except ImportError: + print('DeepSpeed is required if you want to use ZeRO.') + +from torch._six import inf +from torch.distributed.distributed_c10d import _get_global_rank +from torch.optim import Optimizer + +from colossalai.core import global_context as gpc +from colossalai.registry import OPTIMIZER_WRAPPERS +from colossalai.utils import report_memory_usage +from .loss_scaler import LossScaler, DynamicLossScaler +from ...context.parallel_mode import ParallelMode + +# Toggle this to true to enable correctness test +# with gradient partitioning and without +pg_correctness_test = False + +FWD_MODULE_STACK = list() + + +def print_rank_0(message, debug=False, force=False): + rank = torch.distributed.get_rank() + if rank == 0 and (debug or force): + print(message) + # other variations + # - print for all ranks w/o interleaving + # printflock(f"[{rank}] {message}") + # - print to log file per rank + # log_rank_file(rank, message) + + +def input(msg): + return + + +def split_half_float_double(tensors): + dtypes = [ + "torch.cuda.HalfTensor", + "torch.cuda.FloatTensor", + "torch.cuda.DoubleTensor" + ] + buckets = [] + for i, dtype in enumerate(dtypes): + bucket = [t for t in tensors if t.type() == dtype] + if bucket: + buckets.append(bucket) + return buckets + + +def isclose(a, b, rtol=1e-09, atol=0.0): + return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol) + + +def lcm(x, y): + from fractions import gcd # or can import gcd from `math` in Python 3 + return x * y // gcd(x, y) + + +def move_to_cpu(tensor_list): + for tensor in tensor_list: + tensor.data = tensor.data.cpu() + + +def get_all_parameters(sub_module, recurse=False): + return itertools.chain(sub_module.named_parameters(recurse=recurse), + sub_module.ds_external_parameters()) + + +# apply torch.autograd.Function that calls a backward_function to tensors in output +def _apply_to_tensors_only(module, functional, backward_function, outputs): + if type(outputs) is tuple: + touched_outputs = [] + for output in outputs: + touched_output = _apply_to_tensors_only(module, + functional, + backward_function, + output) + touched_outputs.append(touched_output) + return tuple(touched_outputs) + elif type(outputs) is torch.Tensor: + return functional.apply(module, backward_function, outputs) + else: + return outputs + + +# for each tensor in outputs run the forward_funciton and register backward_function as hook +def _apply_forward_and_backward_to_tensors_only(module, + forward_function, + backward_function, + outputs): + if type(outputs) is tuple: + touched_outputs = [] + for output in outputs: + touched_output = _apply_forward_and_backward_to_tensors_only( + module, + forward_function, + backward_function, + output) + touched_outputs.append(touched_output) + return tuple(touched_outputs) + elif type(outputs) is torch.Tensor: + forward_function(outputs) + if outputs.requires_grad: + outputs.register_hook(backward_function) + return outputs + else: + return outputs + + +class ZeROOrderedDict(OrderedDict): + def __init__(self, parent_module, *args, **kwargs): + """A replacement for ``collections.OrderedDict`` to detect external ZeRO params. + + Args: + parent_module (``collections.OrderedDict``): the collection to replace + """ + + super().__init__(*args, **kwargs) + self._parent_module = parent_module + self._in_forward = False + + def __getitem__(self, key): + param = super().__getitem__(key) + + # Params can be registered as None (e.g., bias) + if param is None: + return param + + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + if self._parent_module._parameters._in_forward: + print_rank_0(f'Registering external parameter from getter {key}', + force=False) + register_external_parameter(FWD_MODULE_STACK[-1], param) + param.all_gather() + + return param + + +def _inject_parameters(module, cls): + for module in module.modules(): + if cls == ZeROOrderedDict: + new_param = cls(parent_module=module) + else: + new_param = cls() + + for key, param in module._parameters.items(): + new_param[key] = param + module._parameters = new_param + + +# TODO Needs to be implemented +class PrefetchCoordinator(object): + def __init__(self): + # step_id keeps track of the number of sub-modules invoked so far + # the step_id is tracking forward and backward sequence of sub-modules + self.step_id = 0 + + # stores the sequence of sub modules in forward+backward pass + self.sub_module_trace = [] + + # maps sub_module id to submodule objects + self.id_to_sub_module_map = {} + + # stores the total number of parmeters in each sub_module + self.id_to_sub_module_size_map = {} + + self.trace_completed = False + + self.most_recent_sub_module_step = {} + + # reuse distances + self.reuse_numel_for_step_id = {} + + def record_trace(self, sub_module): + if not self.trace_completed: + self.sub_module_trace.append(sub_module.id) + self.id_to_sub_module_map[sub_module.id] = sub_module + + def print_trace(self): + print_rank_0( + f"The module trace is : {[self.id_to_sub_module_map[module_id].id for module_id in self.sub_module_trace]}" + ) + + def increment_step(self, sub_module): + self.most_recent_sub_module_step[sub_module.id] = self.step_id + self.step_id += 1 + + def reset_step(self): + self.step_id = 0 + + # returns the next numel parameters that will be used next but are not available or inflight + def get_params_to_prefetch(self, sub_module, numel=2000000): + + # numel_in_sub_module = 0 + # for name, param in sub_module.named_parameters(recurse=False): + # numel_in_sub_module += param.ds_numel + + # #if numel_in_sub_module < (numel // 2): + # return [] + + # tracing failed. The sub_module passed at the step_id must match with the sub_module during tracing + if sub_module.id != self.sub_module_trace[self.step_id]: + print_rank_0( + f"Tracing failed. Prefetching is disabled at sub-module: {debug_module2name_id(sub_module)}" + ) + return [] + + params_to_prefetch = [] + total_numel_to_prefetch = 0 + + for i in range(self.step_id, len(self.sub_module_trace)): + module_id = self.sub_module_trace[i] + for _, param in get_all_parameters(self.id_to_sub_module_map[module_id]): + if param.ds_status is ZeroParamStatus.NOT_AVAILABLE and ( + param.ds_id not in [p.ds_id for p in params_to_prefetch]): + params_to_prefetch.append(param) + total_numel_to_prefetch += param.ds_numel + # print_rank_0(f"Total numel to prefetch: {total_numel_to_prefetch}. Param: {param.ds_shape} and numel {param.ds_numel}, numel limit {numel}") + # and total_numel_to_prefetch > (numel_in_sub_module // 2): + if total_numel_to_prefetch >= numel: + return params_to_prefetch + + return params_to_prefetch + + # checks if this sub_module will be used again and if so then returns the number of elements + # in the parameters used between this sub_module and the reuse of this sub_module + def get_reuse_distance_in_numel(self, sub_module, sub_module_step_id=None): + # assert is_forward is not None, "is_forward must be set to True for Forward Propagation and False for backward Propagation" + is_there_reuse = False + reuse_distance_in_numel = 1000000000000 + + # set the appropriate trace + trace = self.sub_module_trace + total_steps = len(trace) + if sub_module_step_id is None: + sub_module_step_id = self.most_recent_sub_module_step[sub_module.id] + + # tracing failed. The sub_module passed at the step_id must match with the sub_module during tracing + if sub_module.id != trace[sub_module_step_id]: + print_rank_0( + f"Tracing failed. Cannot tell if the sub_module: {sub_module.id} is reused" + ) + return reuse_distance_in_numel + + # return cached value + if sub_module_step_id in self.reuse_numel_for_step_id: + return self.reuse_numel_for_step_id[sub_module_step_id] + + start_step = self.step_id + print_rank_0(f"Step id is {self.step_id} ") + for step_id in range(start_step, total_steps): + print_rank_0( + f"Trace id {trace[step_id]} and sub_module id {sub_module.id}") + if sub_module.id == trace[step_id]: + end_step = step_id + + is_there_reuse = True + reuse_distance_in_numel = self._distance_in_numel( + start_step, + end_step, + trace) + break + + self.reuse_numel_for_step_id[sub_module_step_id] = reuse_distance_in_numel + + return reuse_distance_in_numel + + def _distance_in_numel(self, start_step, end_step, trace): + distance_in_numel = 0 + for step_id in range(start_step, end_step): + module_id = trace[step_id] + for _, param in self.id_to_sub_module_map[module_id].named_parameters(recurse=False): + distance_in_numel += param.ds_numel + for _, param in self.id_to_sub_module_map[module_id].ds_external_parameters(): + distance_in_numel += param.ds_numel + return distance_in_numel + + +class PartitionedParameterCoordinator(object): + def __init__(self, + comm_stream=None, + max_reuse_distance_in_numel=500000000, + max_available_parameters_in_numel=700000000): + + self.in_flight_handles = [] + self.params_in_flight = [] + self.comm_stream = comm_stream if comm_stream is not None else torch.cuda.current_stream( + ) + self.prefetch_coordinator = PrefetchCoordinator() + self.hierarchy = 0 + + self.total_available_parameter_numel = 0 + self.max_available_parameters_in_numel = max_available_parameters_in_numel + + # max distance between two use of the module beyond which module is released + self.max_reuse_distance_in_numel = max_reuse_distance_in_numel + + def _increment_available_parameter_numel(self, increment): + self.total_available_parameter_numel += increment + + def _decrement_available_parameter_numel(self, decrement): + self.total_available_parameter_numel -= decrement + + '''-----------------------Tracing and Prefetching ---------------''' + + def record_trace(self, sub_module): + self.prefetch_coordinator.record_trace(sub_module) + + def finish_tracing(self, print_trace=False): + self.prefetch_coordinator.trace_completed = True + + if print_trace: + self.prefetch_coordinator.print_trace() + + # swap in parameter partitions from nvme for those parameters that will be used + # after the ones that are already being prefetched into full parameters + def _prefetch_nvme_param_partitions(self, sub_module, params_in_flight): + numel_in_flight = sum( + [param.ds_tensor.ds_numel for param in params_in_flight]) + upcoming_param_list = self.prefetch_coordinator.get_params_to_prefetch( + sub_module, + numel=2 * numel_in_flight) + swap_in_params = [] + for param in upcoming_param_list: + if len(swap_in_params) >= param.nvme_swapper.available_swap_in_buffers(): + break + if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE: + swap_in_params.append(param) + + if len(swap_in_params) > 0: + swap_in_params[0].nvme_swapper.swap_in( + swap_in_params, async_op=True) + + # Pre fetches the parameters for sub_modules that comes after + # the current sub_module. This call is asynchronous + def prefetch_next_sub_modules(self, sub_module, numel=5000000, nvme=False): + + params_to_prefetch = [] + if not self.prefetch_coordinator.trace_completed: + return params_to_prefetch + + # prefetch if there is no current prefetching in flight + if not self.in_flight_handles and self.total_available_parameter_numel < self.max_available_parameters_in_numel: + params_to_prefetch = self.prefetch_coordinator.get_params_to_prefetch( + sub_module, + numel=numel) + + self._all_gather(params_to_prefetch, async_op=True) + for param in params_to_prefetch: + param.ds_status = ZeroParamStatus.INFLIGHT + + # keeping track of number of elements consumed by available parmaeters + self._increment_available_parameter_numel(param.ds_numel) + + if nvme: + self._prefetch_nvme_param_partitions( + sub_module, params_to_prefetch) + + self._print_prefetch_elements_info(sub_module, params_to_prefetch) + print_rank_0( + f"{'--' * self.hierarchy}--PreFetching parameters {[param.ds_id for param in params_to_prefetch]} and available {self.total_available_parameter_numel}, max limit {self.max_available_parameters_in_numel}", + force=False) + + def _print_prefetch_elements_info(self, sub_module, params_to_prefetch): + sub_module_numel = 0.0 + for name, param in sub_module.named_parameters(recurse=False): + sub_module_numel += param.ds_numel + numel_being_prefetched = 0 + for param in params_to_prefetch: + numel_being_prefetched = param.ds_numel + print_rank_0( + f"{'--' * self.hierarchy}--PreFetching {numel_being_prefetched} numels and number of numel in the next sub module is {sub_module_numel}", + force=False) + + def increment_step(self, sub_module): + self.prefetch_coordinator.increment_step(sub_module) + + def reset_step(self): + self.prefetch_coordinator.reset_step() + + '''----------------------------------------------------------------------''' + + # Fetches the parameters in the sub_module + # This call is blocking + def fetch_sub_module(self, sub_module): + partitioned_params = [] + params_in_flight = False + print_rank_0( + f"{'--' * self.hierarchy}Fetching params in module {debug_module2name_class(sub_module)}" + ) + params_to_fetch = [ + param for _, + param in sub_module.named_parameters(recurse=False) + ] + # print([n for n,p in sub_module.named_parameters(recurse=False)]) + + if hasattr(sub_module, 'ds_external_parameters'): + print_rank_0( + f"{'--' * self.hierarchy}--Fetching external parameters {sub_module.ds_external_parameters()}" + ) + params_to_fetch += [ + param for _, + param in sub_module.ds_external_parameters() + ] + # for _, param in sub_module.named_parameters(recurse=False): + for param in params_to_fetch: + param.ds_active_sub_modules += 1 + print_rank_0( + f"{'--' * self.hierarchy}--Fetching parameters {debug_param2name_id_shape(param)} with active sub modules {param.ds_active_sub_modules}" + ) + + if param.ds_status == ZeroParamStatus.AVAILABLE: + print_rank_0( + f"{'--' * self.hierarchy}--Parameter {debug_param2name_id(param)} is already available" + ) + + if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: + print_rank_0( + f"{'--' * self.hierarchy}--Parameter {debug_param2name_id(param)} is being fetched" + ) + partitioned_params.append(param) + + # keeping track of number of elements consumed by available parmaeters + self._increment_available_parameter_numel(param.ds_numel) + print_rank_0(f"Incrementing with parameter id {param.ds_id}") + + if param.ds_status == ZeroParamStatus.INFLIGHT: + params_in_flight = True + print_rank_0( + f"{'--' * self.hierarchy}--Parameters {debug_param2name_id(param)} is already in flight (prefetched)" + ) + self.hierarchy += 1 + + # parameters are partitioned and need to be allgathered + self._all_gather(partitioned_params, async_op=True) + + # parameters are inflight and communication needs to be completed + if partitioned_params or params_in_flight: + self._synchronize_communication() + + for _, param in sub_module.named_parameters(recurse=False): + param.ds_status = ZeroParamStatus.AVAILABLE + print_rank_0( + f"Param {debug_param2name_id_shape_device(param)} norm={param.norm()}", + force=False) + # print_rank_0(f"After fetching (id, shape, device): {[(param.ds_id, param.shape, param.device) for param in sub_module.named_parameters(recurse=False)]}") + + def release_sub_module(self, sub_module): + self.hierarchy -= 1 + print_rank_0( + f"{'--' * self.hierarchy}Releasing params in module {debug_module2name_class(sub_module)}" + ) + params_to_release = [ + param for _, + param in sub_module.named_parameters(recurse=False) + ] + + if hasattr(sub_module, 'ds_external_parameters'): + # print_rank_0(f"Releasing external parameters {sub_module.ds_external_parameters()}") + params_to_release += [ + param for _, + param in sub_module.ds_external_parameters() + ] + + # for _, param in sub_module.named_parameters(recurse=False): + for param in params_to_release: + param.ds_active_sub_modules -= 1 + if not param.ds_active_sub_modules and not self._keep_for_later( + sub_module) and not param.ds_persist: + + print_rank_0( + f"{'--' * self.hierarchy}--Releasing parameter {debug_param2name_id_numel(param)} active sub modules {param.ds_active_sub_modules} and keep for later {self._keep_for_later(sub_module)}", + force=False) + + # Keeping track of number of elements that are consumed by available parameters + self._decrement_available_parameter_numel(param.ds_numel) + + # report_memory_usage( + # f"Before releasing param {debug_param2name_id_numel(param)}", + # ) + param.partition(hierarchy=self.hierarchy) + + # report_memory_usage( + # f"After releasing param {debug_param2name_id_numel(param)}", + # ) + + param.ds_status = ZeroParamStatus.NOT_AVAILABLE + else: + print_rank_0( + f"{'--' * self.hierarchy}--Did not release param {debug_param2name_id_numel(param)} with active sub modules {param.ds_active_sub_modules}, keep for later={self._keep_for_later(sub_module)} and persistence={param.ds_persist}", + force=False) + + def release_and_reset_parameter(self, param): + param.ds_active_sub_modules = 0 + if param.ds_status == ZeroParamStatus.AVAILABLE: + print_rank_0( + f"Releasing unpartitioned param {debug_param2name_id_numel(param)} active sub-modules {param.ds_active_sub_modules} and persisitence {param.ds_persist}" + ) + self._decrement_available_parameter_numel(param.ds_numel) + param.partition() + + def _keep_for_later(self, sub_module): + if not self.prefetch_coordinator.trace_completed: + return False + if self.max_reuse_distance_in_numel == 0: + return False + reuse_distance_in_numel = self.prefetch_coordinator.get_reuse_distance_in_numel( + sub_module) + # print_rank_0(f"Reuse distance and numel for sub_module id {sub_module.id} is {reuse_distance_in_numel}") + return reuse_distance_in_numel < self.max_reuse_distance_in_numel + + def _all_gather(self, partitioned_params, async_op=False): + with torch.cuda.stream(self.comm_stream): + handles = partitioned_params[0].all_gather( + param_list=partitioned_params, + async_op=async_op, + hierarchy=self.hierarchy) if partitioned_params else None + + if handles is not None: + self.in_flight_handles.extend(handles) + self.params_in_flight.extend(partitioned_params) + + def _synchronize_communication(self, synchronize_streams=True): + assert len(self.params_in_flight) == len(self.in_flight_handles) + for handle, param in zip(self.in_flight_handles, self.params_in_flight): + if handle is not None: + with torch.cuda.stream(self.comm_stream): + handle.wait() + param.ds_status = ZeroParamStatus.AVAILABLE + self.comm_stream.synchronize() + torch.cuda.synchronize() if synchronize_streams else None + self.in_flight_handles = [] + self.params_in_flight = [] + + +class PreBackwardFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, module, pre_backward_function, outputs): + ctx.module = module + ctx.pre_backward_function = pre_backward_function + if not hasattr(module, "applied_pre_backward_ref_cnt"): + module.applied_pre_backward_ref_cnt = 0 + module.applied_pre_backward_ref_cnt += 1 + # print(f"After Forward: {ctx.module.__class__.__name__}") + outputs = outputs.detach() + return outputs + + @staticmethod + def backward(ctx, *args): + # print(f"Before Backward: {ctx.module.__class__.__name__}") + ctx.pre_backward_function(ctx.module) + return (None, None) + args + + +class PostBackwardFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, module, pre_backward_function, output): + ctx.module = module + if output.requires_grad: + # TODO SOME TIMES post backward does not report_memory_usage()ered debug in detail + # Should only cause increase in memory not correctness issue + # if output.grad_fn.__class__.__name__ == 'ViewBackward': + # ctx.view=True + # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") + # assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." + # if module.ds_grads_remaining == 0: + # print(f"Before Forward: {ctx.module.__class__.__name__}") + module.ds_grads_remaining += 1 + ctx.pre_backward_function = pre_backward_function + output = output.detach() + return output + + @staticmethod + def backward(ctx, *args): + ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 + if ctx.module.ds_grads_remaining == 0: + ctx.pre_backward_function(ctx.module) + # print(f"After Backward: {ctx.module.__class__.__name__}") + return (None, None) + args + + +INITIAL_MICRO_STEP_ID = -1 + + +@OPTIMIZER_WRAPPERS.register_module +class ZeroRedundancyOptimizer_Level_3(Optimizer): + """ + ZeroRedundancyOptimizer_Level_3 designed to reduce the memory footprint + required for training large deep learning models. + + For more details please report_memory_usage() Optimization Towards Training A Trillion Parameter Models + https://arxiv.org/abs/1910.02054 + + """ + + def __init__(self, + module, + init_optimizer, + dp_paralllel_mode=ParallelMode.DATA, + static_loss_scale=1.0, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=False, + contiguous_gradients=True, + reduce_bucket_size=500000000, + prefetch_bucket_size=50000000, + max_reuse_distance=1000000000, + max_live_parameters=1000000000, + param_persistence_threshold=100000, + reduce_scatter=True, + overlap_comm=False, + offload_optimizer_config=None, + offload_param_config=None, + sub_group_size=1000000000000, + clip_grad=0.0, + allreduce_always_fp32=False, + postscale_gradients=True, + gradient_predivide_factor=1.0, + gradient_accumulation_steps=1, + aio_config=None): + # mpu = None + # mpu is removed from the parameter list + # tensor parallel will be automatically detected later + + # LSG: default parameter for compatibility + elastic_checkpoint = False + timers = None + dp_process_group = gpc.get_group(dp_paralllel_mode) + self.verbose = verbose + + # LSG: in deepspeed deepspeed/runtime/zero/partition_parameters.py, + # self.local_device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"])) + # the local device is obtained by env var LOCAL_RANK, thus, need to change this + # env var on the spot as LOCAL_RANK may not be present + if not 'LOCAL_RANK' in os.environ: + device_id = gpc.get_global_rank() % torch.cuda.device_count() + os.environ['LOCAL_RANK'] = str(device_id) + + # self.local_device = torch.device('cuda:{}'.format(os.environ["LOCAL_RANK"])) + + if self.verbose: + report_memory_usage("Stage 3 initialize beginning") + + if dist.get_rank() == 0: + print(f"Reduce bucket size {reduce_bucket_size}") + print(f"Allgather bucket size {prefetch_bucket_size}") + # The fused optimizer does all the work. We need this layer for two reason: + # 1. maintain same user API from apex.fp16_utils + # 2. keep common stuff here in case we need to add ne552w fused optimizer later + + # differences from apex.fp16_utils: + # - assume all model params in fp16 + # - assume all params requires grad + # - flat by groups, not keeping state. TODO: remove state explicitly? + # - master gard and unflat master weight never exist. TODO: a way to save out unflat master? + if not torch.cuda.is_available: + raise SystemError("Cannot use fp16 without CUDA.") + self.optimizer = init_optimizer + self.defaults = init_optimizer.defaults + + # Load pre-built or JIT compile (un)flatten ops + util_ops = UtilsBuilder().load() + self.flatten = util_ops.flatten + self.unflatten = util_ops.unflatten + self.dtype = self.optimizer.param_groups[0]['params'][0].dtype + + if not all(is_zero_param(p) for p in module.parameters()): + group = None + if gpc.is_initialized(ParallelMode.DATA): + group = gpc.get_group(ParallelMode.DATA) + Init(module=module, data_parallel_group=group, dtype=self.dtype) + + for m in module.modules(): + _init_external_params(m) + + self.module = module + self.elastic_checkpoint = elastic_checkpoint + self.overlap_comm = overlap_comm + + # Replace ._parameters with a new class to enable auto-registration of + # external parameters + _inject_parameters(module, ZeROOrderedDict) + + if self.overlap_comm: + self.gpu_sum = torch.zeros(1, dtype=torch.float).cuda() + + ###################### offload optimizer setup ################################## + self.optimizer_swapper = None + self.swap_optimizer = False + + self.offload_optimizer = False + self.offload_optimizer_pin_memory = False + self.offload_optimizer_fast_init = False + if offload_optimizer_config is not None: + self.offload_optimizer = True + self.offload_optimizer_pin_memory = offload_optimizer_config[ + OFFLOAD_OPTIMIZER_PIN_MEMORY] + self.swap_optimizer = offload_optimizer_config[ + OFFLOAD_OPTIMIZER_DEVICE] == OFFLOAD_NVME_DEVICE + self.offload_optimizer_fast_init = offload_optimizer_config[ + OFFLOAD_OPTIMIZER_FAST_INIT] + + ###################### offload param setup ################################## + self.offload_param = False + self.offload_param_pin_memory = False + self.params_in_nvme_and_cpu = False + self.max_params_in_cpu = 0 + if offload_param_config is not None: + assert self.offload_optimizer, "parameter offload is only available with optimizer state offload" + self.offload_param = True + self.offload_param_pin_memory = offload_param_config[ + OFFLOAD_PARAM_PIN_MEMORY] + self.params_in_nvme_and_cpu = offload_param_config[ + OFFLOAD_PARAM_DEVICE] == OFFLOAD_NVME_DEVICE + self.max_params_in_cpu = offload_param_config[OFFLOAD_PARAM_MAX_IN_CPU] + if self.verbose: + print_rank_0( + f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}", + force=False) + + self.deepspeed_adam_offload = (self.offload_optimizer + and type(init_optimizer) == DeepSpeedCPUAdam) + + self.device = torch.cuda.current_device( + ) if not self.offload_optimizer else OFFLOAD_CPU_DEVICE + ############################################################################ + + if self.verbose: + report_memory_usage("Before Partitioned Parameter Coordinator") + + fetch_stream = torch.cuda.Stream() if self.overlap_comm else None + self.param_coordinator = PartitionedParameterCoordinator( + comm_stream=fetch_stream, + max_reuse_distance_in_numel=int(max_reuse_distance), + max_available_parameters_in_numel=int(max_live_parameters)) + + if self.verbose: + report_memory_usage("After Partitioned Parameter Coordinator") + + # self.param_coordinator = PartitionedParameterCoordinator(comm_stream=torch.cuda.Stream()) + # -------------Stage 3 Setup-------------------# + # parameters smaller than the threshold will be collectively gathered at the + # end of the optimizer step and will be kept till the end of the backward pass + # TODO maybe worth just replicating these parameters and doing all reduce for them + self.persistence_threshold = int(param_persistence_threshold) + + self.persistent_parameters = self.persistent_parameters() + + self.setup_zero_stage3_hooks() + + # resetting ds_tensor just in case parameters have been changed after initialization + # example .half() or .to() + # self.reset_ds_tensor() + # ---------------------------------------------# + + self.timers = timers + + self.reduce_scatter = reduce_scatter + + self.dp_process_group = dp_process_group + + self.partition_count = dist.get_world_size(group=self.dp_process_group) + + if gpc.is_initialized(ParallelMode.TENSOR) is None: + self.model_parallel_group = None + self.model_parallel_rank = 0 + else: + self.model_parallel_group = gpc.get_group(ParallelMode.TENSOR) + self.model_parallel_rank = gpc.get_local_rank(ParallelMode.TENSOR) + + self.overflow = False + self.clip_grad = clip_grad + self.allreduce_always_fp32 = allreduce_always_fp32 + self.gradient_predivide_factor = gradient_predivide_factor + self.postscale_gradients = postscale_gradients + self.gradient_accumulation_steps = gradient_accumulation_steps + self.micro_step_id = INITIAL_MICRO_STEP_ID + + if self.reduce_scatter: + assert not self.allreduce_always_fp32, "allreduce_always_fp32 is not yet supported with ZeRO-2 with reduce scatter enabled" + assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-2 with reduce scatter enabled" + assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-2 with reduce scatter enabled" + + # Holds the mode parameter + # The param.data may not hold any meaningful data + # when param's status is NOT_AVAILABLE or IN_FLGHT + self.fp16_groups = [] + + # Hold partitioned parameters + self.fp16_partitioned_groups = [] + + # Holds a fused and flattened copy of the parameters + self.fp16_partitioned_groups_flat = [] + self.fp16_partitioned_groups_flat_numel = [] + + # defragmented pinned memory + self.param_groups_fp16_flat_cpu_memory = [] + + # a single 32-bit partition of the parallel partitioned parameters + # that this process will update + self.fp32_partitioned_groups_flat = [] + self.next_swappable_fp32_partitioned_groups = [] + + # number of elements per partition in each group + self.partition_size = [] + + self.all_reduce_print = False + + self.prefetch_elements = int(prefetch_bucket_size) + + # padding on each partition for alignment purposes + self.groups_padding = [] + + self.sub_group_size = sub_group_size + + self.sub_group_to_group_id = {} + + if self.verbose: + report_memory_usage("Before creating fp16 partitions") + self._create_fp16_partitions_with_defragmentation() + num_fp16_subgroups = len(self.fp16_partitioned_groups_flat) + if self.verbose: + report_memory_usage( + f"After creating fp16 partitions: {num_fp16_subgroups}") + + # Optimizer ensor swapping + if self.swap_optimizer: + self._configure_tensor_swapping( + offload_optimizer_config, aio_config) + + if self.verbose: + report_memory_usage("Before creating fp32 partitions") + self._create_fp32_partitions() + if self.verbose: + report_memory_usage("After creating fp32 partitions") + dist.barrier() + + # To support pipelined optimizer swapping + self._create_next_swappable_fp32_groups() + + if self.verbose: + report_memory_usage("Before initializing optimizer states") + self.initialize_optimizer_states() + if self.verbose: + report_memory_usage("After initializing optimizer states") + dist.barrier() + + if dist.get_rank() == 0 and self.verbose: + print(f"optimizer state initialized") + + self.reduce_bucket_size = int(reduce_bucket_size) + + self.reduction_event = torch.cuda.Event( + enable_timing=False, blocking=False) + + self.reduction_stream = torch.cuda.Stream( + ) if self.overlap_comm else torch.cuda.current_stream() + self.callback_queued = False + self.copy_grad_stream = torch.cuda.Stream() + + self.param_dict = {} + + # map between param_id and bool to specify if a param is in this partition + self.is_param_in_current_partition = {} + + self.contiguous_gradients = contiguous_gradients + self.extra_large_param_to_reduce = None + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + self.elements_in_ipg_bucket = 0 + self.params_already_reduced = [] + self.is_gradient_accumulation_boundary = True + self._release_ipg_buffers() + self.previous_reduced_grads = None + + # simplified param id + self.param_id = {} + + count = 0 + for i, params_group in enumerate(self.fp16_groups): + for param in params_group: + unique_id = id(param) + self.param_id[unique_id] = count + self.param_dict[count] = param + self.params_already_reduced.append(False) + count = count + 1 + + # Largest partitioned param + largest_partitioned_param_numel = max([ + max([tensor.numel() for tensor in fp16_partitioned_group]) + for fp16_partitioned_group in self.fp16_partitioned_groups + ]) + if self.verbose: + print_rank_0( + f'Largest partitioned param numel = {largest_partitioned_param_numel}', + force=False) + + if self.verbose: + report_memory_usage(f"Before Set Grad positions") + + self.grad_position = {} + self.set_grad_positions() + if self.verbose: + report_memory_usage(f"Before CPU Offload initialization") + + self.grads_in_partition = None + + if self.offload_optimizer: + self.accumulated_grads_in_cpu = {} + self.norm_for_param_grads = {} + self.local_overflow = False + self.temp_grad_buffer_for_gpu_offload = torch.zeros( + largest_partitioned_param_numel, + device=torch.cuda.current_device(), + dtype=self.dtype) + self.temp_grad_gpu_buffer = torch.zeros(largest_partitioned_param_numel, + device=torch.cuda.current_device(), + dtype=self.dtype) + + if self.verbose: + report_memory_usage(f"After CPU Offload initialization") + + # stores if a partition has been reduced in this step + self.is_partition_reduced = {} + + # stores if a grad in a partition has been computed or not + self.is_grad_computed = {} + + # will store the averaged gradients required by this parititon + self.averaged_gradients = {} + + # creates backward hooks for gradient partitioning + self.create_reduce_and_remove_grad_hooks() + + # exit(0) + + # we may have a way of fusing dynamic scale. Do not support for now + if self.dtype == torch.float or not dynamic_loss_scale: + loss_scale_value = 1.0 if self.dtype == torch.float else static_loss_scale + + self.dynamic_loss_scale = False + self.loss_scaler = LossScaler(scale=loss_scale_value) + cur_iter = 0 + else: + if dynamic_loss_args is None: + self.loss_scaler = DynamicLossScaler() + else: + self.loss_scaler = DynamicLossScaler(**dynamic_loss_args) + + self.dynamic_loss_scale = True + + self.debug_fp16_grads = [{} for _ in self.fp16_groups] + + if dist.get_rank(group=self.dp_process_group) == 0 and self.verbose: + report_memory_usage(f"After initializing ZeRO optimizer") + + def _configure_tensor_swapping(self, offload_optimizer_config, aio_config): + nvme_swap_folder = os.path.join( + offload_optimizer_config[OFFLOAD_OPTIMIZER_NVME_PATH], + 'zero_stage_3') + os.makedirs(nvme_swap_folder, exist_ok=True) + if torch.distributed.get_rank() == 0 and self.verbose: + print(f'Tensor Swapping: Adding optimizer tensors') + + swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config[ + OFFLOAD_OPTIMIZER_PIPELINE] else PartitionedOptimizerSwapper + + self.optimizer_swapper = swapper_type( + swap_config=offload_optimizer_config, + aio_config=aio_config, + base_folder=nvme_swap_folder, + optimizer=self.optimizer, + largest_numel=max(self.fp16_partitioned_groups_flat_numel), + device=self.device, + dtype=torch.float32, + timers=self.timers) + + def _create_fp16_partitions(self): + dist.barrier() + partition_id = dist.get_rank(group=self.dp_process_group) + + # loop to deal with groups + for j, param_group in enumerate(self.optimizer.param_groups): + + sub_groups = self._create_fp16_sub_groups(param_group['params']) + for sub_group in sub_groups: + i = len(self.fp16_groups) + + # push this group to list before modify + self.fp16_groups.append(sub_group) + self.sub_group_to_group_id[i] = j + + # These are the list of the partitioned parameters + self.fp16_partitioned_groups.append( + [param.ds_tensor for param in self.fp16_groups[i]]) + + if self.verbose: + print_rank_0( + f"fp16 group {i} partitioned_param norms : {[param.ds_tensor.norm().item() for param in self.fp16_groups[i]]}" + ) + + # Record padding required to align group to world size (only applies to last rank) + if partition_id == dist.get_world_size(group=self.dp_process_group) - 1: + padding = [p.padding_size() for p in self.fp16_groups[i]] + else: + padding = [0] * len(self.fp16_groups[i]) + self.groups_padding.append(padding) + + # not sure why apex was cloning the weights before flattening + # removing cloning here + if self.verbose: + report_memory_usage(f"Before Flattening param group {i}") + + if not self.offload_param: + if self.verbose: + report_memory_usage( + f"Before moving param group {i} to CPU") + # move all the parameters to cpu to free up GPU space for creating flat buffer + move_to_cpu(self.fp16_partitioned_groups[i]) + if self.verbose: + report_memory_usage( + f"After moving param group {i} to CPU") + + # create flat buffer in CPU and move to GPU + self.fp16_partitioned_groups_flat.append( + self.flatten_dense_tensors_aligned( + self.fp16_partitioned_groups[i], + dist.get_world_size(group=self.dp_process_group)).cuda( + torch.cuda.current_device())) + + if self.verbose: + report_memory_usage( + f"After flattening and moving param group {i} to GPU" + ) + else: + # Without the detach, report_memory_usage()lattening becomes part of the + # model graph causing errors downstream + self.fp16_partitioned_groups_flat.append( + self.flatten_dense_tensors_aligned( + self.fp16_partitioned_groups[i], + dist.get_world_size( + group=self.dp_process_group)).detach().pin_memory()) + + if self.verbose: + report_memory_usage(f"After Flattening param group {i}") + + # set model fp16 weight to slices of flattened buffer + updated_params = self.unflatten(self.fp16_partitioned_groups_flat[i], + self.fp16_partitioned_groups[i]) + + for partitioned_param, q in zip(self.fp16_partitioned_groups[i], updated_params): + partitioned_param.data = q.data + + def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False): + '''If flat buffer is None then the parameters in the param_list are + not copied to the flat buffer. This is because they excede the number of max_params_in_cpu + Some of these parameters may aready be in CPU in unflattened buffers + or they maybe in GPU, or they maybe in NVME. If they are in NVME, then + they will be marked as NOT_AVAILABLE, and will be moved to CPU when they are + needed during training.''' + if flat_buffer is None: + # this dst buffer is on NVMe, so skip this + return + + start = 0 + for param in param_list: + src = param.ds_tensor + dest = flat_buffer.narrow(0, start, src.ds_numel) + start = start + src.ds_numel + '''if the parameter was initialized in nvme then bring it to the destination buffer directly''' + if src.status == PartitionedParamStatus.NOT_AVAILABLE: + if self.verbose: + print_rank_0( + f"Swapping in {param.ds_id} with partition size {param.ds_tensor.ds_numel} permanently to CPU" + ) + param.nvme_swapper.swap_into_buffer(param, dest) + src.data = dest.data + src.status = PartitionedParamStatus.AVAILABLE + else: + assert src.status == PartitionedParamStatus.AVAILABLE, "Partitioned Parm must be avialable here" + if not avoid_copy: + dest.data.copy_(src.data) + src.data = dest.data + + # Final location must be gpu/cpu in this case + param.ds_tensor.final_location = 'not-nvme' + + def _create_param_groups_fp16_flat_cpu_memory(self): + + aggregate_params_count = 0 + + for j, param_group in enumerate(self.optimizer.param_groups): + params_in_group = sum( + [p.ds_tensor.ds_numel for p in param_group['params']]) + + flat_buffer_size = params_in_group + + if self.params_in_nvme_and_cpu and \ + aggregate_params_count + params_in_group > self.max_params_in_cpu: + flat_buffer_size = max(0, + self.max_params_in_cpu - aggregate_params_count) + + aggregate_params_count += params_in_group + + if flat_buffer_size > 0: + if self.verbose: + print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", + force=False) + self.param_groups_fp16_flat_cpu_memory.append( + torch.empty(int(flat_buffer_size), + dtype=self.dtype, + pin_memory=True)) + else: + if self.verbose: + print_rank_0( + f"No flat buffer size. Param group size was {params_in_group}", + force=False) + + self.param_groups_fp16_flat_cpu_memory.append( + torch.empty(1, + dtype=self.dtype)) + + def _create_fp16_partitions_with_defragmentation(self): + dist.barrier() + partition_id = dist.get_rank(group=self.dp_process_group) + create_fp16_flat_reuse_buffer = False + largest_partition_numel = [] + max_partition_numel = 0 + + # create a flat CPU memory allocation for each param group + if self.offload_param: + self._create_param_groups_fp16_flat_cpu_memory() + + # loop to deal with groups + for j, param_group in enumerate(self.optimizer.param_groups): + + sub_groups = self._create_fp16_sub_groups(param_group['params']) + + if self.verbose: + print_rank_0( + f'fp16 group {j} has {len(sub_groups)} subgroups', force=False) + + flat_offset = 0 + for sub_group in sub_groups: + i = len(self.fp16_groups) + + # push this group to list before modify + self.fp16_groups.append(sub_group) + self.sub_group_to_group_id[i] = j + + # comment out for zero_to_fp32 debug + # if torch.distributed.get_rank() == 0: + # for param in self.fp16_groups[i]: + # print(f"{debug_param2name_id_shape(param)} {param.ds_shape}") + + # These are the list of the partitioned parameters + self.fp16_partitioned_groups.append( + [param.ds_tensor for param in self.fp16_groups[i]]) + + total_elements = sum( + [t.ds_numel for t in self.fp16_partitioned_groups[i]]) + self.fp16_partitioned_groups_flat_numel.append(total_elements) + + if total_elements > max_partition_numel: + largest_partition_numel = [ + t.ds_numel for t in self.fp16_partitioned_groups[i] + ] + max_partition_numel = total_elements + + if self.verbose: + print_rank_0( + f"fp16 group {i} partitioned_param norms : {[param.ds_tensor.norm().item() for param in self.fp16_groups[i]]}" + ) + + # Record padding required to align group to world size (only applies to last rank) + if partition_id == dist.get_world_size(group=self.dp_process_group) - 1: + padding = [p.padding_size() for p in self.fp16_groups[i]] + else: + padding = [0] * len(self.fp16_groups[i]) + self.groups_padding.append(padding) + + # not sure why apex was cloning the weights before flattening + # removing cloning here + if self.verbose: + report_memory_usage( + f"Before Flattening param subgroup {i}") + + # all partitioned parameters remain in GPU during training + if not self.offload_param: + if self.verbose: + report_memory_usage( + f"Before moving param subgroup group {i} to CPU") + # move all the parameters to cpu to free up GPU space for creating flat buffer + move_to_cpu(self.fp16_partitioned_groups[i]) + if self.verbose: + report_memory_usage( + f"After moving param subgroup {i} to CPU") + + # create flat buffer in CPU and move to GPU + self.fp16_partitioned_groups_flat.append( + self.flatten_dense_tensors_aligned( + self.fp16_partitioned_groups[i], + 1).cuda(torch.cuda.current_device())) + if self.verbose: + report_memory_usage( + f"After flattening and moving param subgroup {i} to GPU") + + # all partitioned parameters are in CPU during training + else: + if self.verbose: + print_rank_0( + f"Params in nvme and cpu {self.params_in_nvme_and_cpu}") + # Flat buffer may not be available for parameters that reside in NVME + if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= \ + self.param_groups_fp16_flat_cpu_memory[ + j].numel(): + fp16_partitioned_group_flat = self.param_groups_fp16_flat_cpu_memory[ + j].narrow(0, + flat_offset, + total_elements) + if self.verbose: + print_rank_0( + f"Creating a flat buffer for subgroup {i} requiring {total_elements} elements, and cumulative CPU elemets {flat_offset + total_elements}", + force=False) + # these parameters reside in NVME and + elif self.params_in_nvme_and_cpu: + fp16_partitioned_group_flat = None + if self.verbose: + print_rank_0( + f"No flat buffer for sub group {i} of {total_elements} elements", + force=False) + else: + assert False, "Either params are in nvme, or they are in CPU memory. This code path should not be triggered. Please report_memory_usage()ms_in_cpu and params_in_nvme configs" + + self.fp16_partitioned_groups_flat.append( + fp16_partitioned_group_flat) + flat_offset += total_elements + + # move param to flat buffer for both param offload on/off + self._move_to_flat_buffer(self.fp16_groups[i], + self.fp16_partitioned_groups_flat[i], + avoid_copy=not self.offload_param) + if self.verbose: + report_memory_usage(f"After Flattening param group {i}") + + # create a pinned memory to be used for swapping out params to NVME after optimizer step + if self.fp16_partitioned_groups_flat[-1] is None: + create_fp16_flat_reuse_buffer = True + + if self.verbose: + report_memory_usage(f"After Flattening param subgroup {i}") + + if create_fp16_flat_reuse_buffer: + assert len( + largest_partition_numel) > 0, f'Unexpected that largest partition is empty' + self.fp16_groups[0][0].nvme_swapper.reserve_partitioned_swap_space( + largest_partition_numel) + + def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id): + offset = 0 + elements_in_sub_group = sum( + [t.ds_numel for t in self.fp16_partitioned_groups[sub_group_id]]) + assert (flat_buffer.numel() == elements_in_sub_group) + for param, partitioned_param in zip(self.fp16_groups[sub_group_id], self.fp16_partitioned_groups[sub_group_id]): + dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel) + if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: + if self.verbose: + print_rank_0( + f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.ds_tensor.ds_numel}" + ) + param.nvme_swapper.swap_in([param], async_op=False) + dest.data.copy_(partitioned_param.data) + param.nvme_swapper.remove_partition_and_release_buffers([ + param]) + if self.verbose: + print_rank_0(f"Swapping in {param.ds_id} done") + else: + dest.data.copy_(partitioned_param.data) + offset += partitioned_param.ds_numel + + def _create_next_swappable_fp32_groups(self): + reverse_order_indices = [ + i for i in range(len(self.fp32_partitioned_groups_flat)) + ] + reverse_order_indices.reverse() + + next_group = None + for i in reverse_order_indices: + self.next_swappable_fp32_partitioned_groups.append(next_group) + if self._swappable_optimizer_subgroup(i): + next_group = self.fp32_partitioned_groups_flat[i] + + self.next_swappable_fp32_partitioned_groups.reverse() + + def _get_sub_group_partitions(self, sub_group_id): + sub_group_partitions = [] + for param, partitioned_param in zip(self.fp16_groups[sub_group_id], self.fp16_partitioned_groups[sub_group_id]): + if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: + swap_path = param.nvme_swapper.get_path(param, True) + sub_group_partitions.append((partitioned_param, + param.ds_tensor.ds_numel, + swap_path)) + else: + sub_group_partitions.append((partitioned_param, + partitioned_param.ds_numel, + None)) + + return sub_group_partitions + + def _create_fp32_partitions(self): + cpu_memory_usage = 0 + cpu_memory_sub_groups = 0 + nvme_memory_usage = 0 + num_swappable_partitions = 0 + num_swap_from_nvme_partitions = 0 + num_swap_from_cpu_partitions = 0 + swap_from_nvme_memory_usage = 0 + swap_from_cpu_memory_usage = 0 + GIGA_BYTES = (1024 ** 3) + + swappable_fp32_tensors = [] + swappable_fp16_src_tensors = [] + nvme_fp16_partitions_info = [] + nvme_fp16_num_elems = [] + nvme_fp32_dest_tensors = [] + fp32_element_size = torch.tensor( + [], dtype=torch.float32).element_size() + + for i, tensor in enumerate(self.fp16_partitioned_groups_flat): + num_elements = self.fp16_partitioned_groups_flat_numel[i] + + # a partition of the fp32 master weights that will be updated by this process + if self._swappable_optimizer_subgroup(i): + self.fp32_partitioned_groups_flat.append(torch.Tensor()) + nvme_memory_usage += (fp32_element_size * num_elements) + num_swappable_partitions += 1 + + if self.params_in_nvme_and_cpu and tensor is None: + num_swap_from_nvme_partitions += 1 + swap_from_nvme_memory_usage += ( + fp32_element_size * num_elements) + if self.offload_optimizer_fast_init: + sub_group_partitions = self._get_sub_group_partitions( + i) + nvme_fp16_partitions_info.append(sub_group_partitions) + nvme_fp16_num_elems.append(num_elements) + nvme_fp32_dest_tensors.append( + self.fp32_partitioned_groups_flat[i]) + else: + unpinned_fp32_buffer = torch.empty(num_elements, + device=self.device, + dtype=torch.float) + self._swap_in_sub_group_to_flat_buffer( + unpinned_fp32_buffer, i) + self.optimizer_swapper.initialize_parameters( + parameters=[self.fp32_partitioned_groups_flat[i]], + src_tensors=[unpinned_fp32_buffer]) + else: + num_swap_from_cpu_partitions += 1 + swap_from_cpu_memory_usage += ( + fp32_element_size * num_elements) + swappable_fp32_tensors.append( + self.fp32_partitioned_groups_flat[i]) + swappable_fp16_src_tensors.append( + self.fp16_partitioned_groups_flat[i]) + else: + cpu_memory_usage += (fp32_element_size * num_elements) + cpu_memory_sub_groups += 1 + + if self.params_in_nvme_and_cpu and tensor is None: + unpinned_fp32_buffer = torch.empty(num_elements, + device=self.device, + dtype=torch.float) + self._swap_in_sub_group_to_flat_buffer( + unpinned_fp32_buffer, i) + self.fp32_partitioned_groups_flat.append( + unpinned_fp32_buffer) + else: + self.fp32_partitioned_groups_flat.append( + self.fp16_partitioned_groups_flat[i].to( + self.device).clone().float().detach()) + + self.fp32_partitioned_groups_flat[ + i].requires_grad = True # keep this in case internal optimizer uses it + + if len(swappable_fp32_tensors) > 0: + self.optimizer_swapper.initialize_parameters( + parameters=swappable_fp32_tensors, + src_tensors=swappable_fp16_src_tensors) + + if len(nvme_fp32_dest_tensors) > 0: + fp16_pinned_buffers = self.fp16_groups[0][ + 0].nvme_swapper.reserve_available_buffers() + assert len(fp16_pinned_buffers) > 0 + self.optimizer_swapper.initialize_from_swapped_fp16_params( + fp16_partitions_info=nvme_fp16_partitions_info, + fp16_num_elems=nvme_fp16_num_elems, + fp16_pinned_buffers=fp16_pinned_buffers, + fp32_parameters=nvme_fp32_dest_tensors) + self.fp16_groups[0][0].nvme_swapper.release_reserved_buffers() + + nvme_gigabytes = nvme_memory_usage / GIGA_BYTES + if self.verbose: + print_rank_0( + f'Swappable FP32 Partitions: count={num_swappable_partitions} size={nvme_gigabytes:5.2f} GB', + force=False) + if self.params_in_nvme_and_cpu: + if self.verbose: + print_rank_0( + f'Swap from NVMe Partitions: count = {num_swap_from_nvme_partitions}, size = {swap_from_nvme_memory_usage / GIGA_BYTES:5.2f}GB', + force=False) + print_rank_0( + f'Swap from CPU Partitions: count = {num_swap_from_cpu_partitions}, size = {swap_from_cpu_memory_usage / GIGA_BYTES:5.2f}GB', + force=False) + + cpu_memory_gigabytes = cpu_memory_usage / GIGA_BYTES + if self.verbose: + print_rank_0( + f'In-Memory FP32 Partitions: count={cpu_memory_sub_groups} size={cpu_memory_gigabytes:5.2f} GB', + force=False) + + # Clear for on-the-fly population before the optimizer step + for param_group in self.optimizer.param_groups: + param_group['params'] = [] + + def _create_fp16_sub_groups(self, params_group): + + params_group_numel = sum([param.partitioned_size() + for param in params_group]) + sub_group_size = self.sub_group_size + + if sub_group_size is None or sub_group_size >= params_group_numel: + return [params_group] + + sub_groups = [] + sub_group = [] + local_sub_group_size = 0 + for param in params_group: + + sub_group.append(param) + local_sub_group_size += param.partitioned_size() + + if local_sub_group_size >= sub_group_size or id(param) == id( + params_group[-1]): + sub_groups.append(sub_group) + + sub_group = [] + local_sub_group_size = 0 + + return sub_groups + + # def reset_ds_tensor(self): + # for name, param in self.module.named_parameters(recurse=True): + # assert hasattr(param,'ds_id'), "Parameters have not been converted to be Zero 3 compatible" + # assert (param.ds_status == ZeroParamStatus.NOT_AVAILABLE), "All the parameters must have been partitioned by now" + # param.ds_tensor.data = param.data + + def setup_zero_stage3_hooks(self): + self.hierarchy = 0 + self._register_hooks_recursively(self.module) + + # reset step at the beginning of forward + def _pre_forward_hook(module, *args): + self.param_coordinator.reset_step() + + # reset step if in inference mode + def _end_of_forward_hook(module, *args): + if not torch._C.is_grad_enabled(): + self.param_coordinator.reset_step() + + # likely one of them should be enough but just to be safe + self.module.register_forward_hook(_end_of_forward_hook) + self.module.register_forward_pre_hook(_pre_forward_hook) + + # Add top todule to stack trace + global FWD_MODULE_STACK + FWD_MODULE_STACK.append(self.module) + + def persistent_parameters(self): + persistent_params = [] + total_persistent_parameters = 0 + params_count = 0 + for _, param in self.module.named_parameters(recurse=True): + if param.ds_numel < self.persistence_threshold: + params_count += 1 + param.ds_persist = True + persistent_params.append(param) + total_persistent_parameters += param.ds_numel + + if self.verbose: + print_rank_0( + f"ZeRO 3: Total persistent parameters: {total_persistent_parameters} in {params_count} params", + force=False) + return persistent_params + + def _register_hooks_recursively(self, module, count=[0]): + my_count = count[0] + module.id = my_count + + # print(f"{module.__class__} : {module.id}") + + for child in module.children(): + count[0] = count[0] + 1 + self._register_hooks_recursively(child, count=count) + + def _pre_forward_module_hook(module, *args): + self.pre_sub_module_forward_function(module) + + def _post_forward_module_hook(module, input, output): + global FWD_MODULE_STACK + FWD_MODULE_STACK.pop() + if output is None: + output = [] + elif not isinstance(output, (list, tuple)): + if torch.is_tensor(output): + output = [output] + else: + # print(f'got UNKNOWN type {type(output)}') + outputs = [] + output = output if isinstance( + output, dict) else vars(output) + for name, val in output.items(): + if not name.startswith('__') and torch.is_tensor(val): + outputs.append(val) + output = outputs + # print(f'convert output to {output}') + + for item in filter(lambda item: is_zero_param(item), output): + if not any(id(item) in m._external_params for m in FWD_MODULE_STACK): + item.ds_active_sub_modules += 1 + module_to_register = FWD_MODULE_STACK[-1] + + if self.verbose: + print_rank_0( + f'Registering dangling parameter for module {module_to_register.__class__.__name__}.', + force=False) + register_external_parameter(module_to_register, item) + + # It's possible that the parameter was already external to the completed module. If so, remove it the + # registration as it will be covered by the outer module instead. + if id(item) in module._external_params: + if self.verbose: + print_rank_0( + f' Unregistering nested dangling parameter from module {module.__class__.__name__}', + force=False) + unregister_external_parameter(module, item) + + item.all_gather() + + self.post_sub_module_forward_function(module) + + def _pre_backward_module_hook(module, inputs, output): + def _run_before_backward_function(sub_module): + # some models (e.g. Albert) may run multiple forwards on the same layer in a loop + # before doing backwards, so each backward will need a pre-fetch - using reference + # counting to support this scenario + # print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") + if sub_module.applied_pre_backward_ref_cnt > 0: + self.pre_sub_module_backward_function(sub_module) + sub_module.applied_pre_backward_ref_cnt -= 1 + # print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") + + return _apply_to_tensors_only(module, + PreBackwardFunction, + _run_before_backward_function, + output) + + # This is an alternate to doing _post_backward_module_hook + # it uses tensor.register_hook instead of using torch.autograd.Function + def _alternate_post_backward_module_hook(module, inputs): + module.ds_grads_remaining = 0 + + # print(f"Before Forward {module.__class__.__name__}") + + def _run_after_backward_hook(*unused): + module.ds_grads_remaining = module.ds_grads_remaining - 1 + if module.ds_grads_remaining == 0: + # print(f"After backward {module.__class__.__name__}") + self.post_sub_module_backward_function(module) + + def _run_before_forward_function(input): + if input.requires_grad: + module.ds_grads_remaining += 1 + + return _apply_forward_and_backward_to_tensors_only( + module, + _run_before_forward_function, + _run_after_backward_hook, + inputs) + + def _post_backward_module_hook(module, inputs): + module.ds_grads_remaining = 0 + + def _run_after_backward_function(sub_module): + if sub_module.ds_grads_remaining == 0: + self.post_sub_module_backward_function(sub_module) + + return _apply_to_tensors_only(module, + PostBackwardFunction, + _run_after_backward_function, + inputs) + + # Pre forward hook + module.register_forward_pre_hook(_pre_forward_module_hook) + # Post forward hook + module.register_forward_hook(_post_forward_module_hook) + + # Pre backward hook + module.register_forward_hook(_pre_backward_module_hook) + + # post backward hook + module.register_forward_pre_hook(_post_backward_module_hook) + + def pre_sub_module_forward_function(self, sub_module): + if self.verbose: + report_memory_usage( + f"Before sub module function {sub_module.__class__.__name__}") + + global FWD_MODULE_STACK + FWD_MODULE_STACK.append(sub_module) + + self.param_coordinator.record_trace(sub_module) + + self.param_coordinator.fetch_sub_module(sub_module) + if self.verbose: + report_memory_usage( + f"Before sub module function {sub_module.__class__.__name__} after fetch") + + self.param_coordinator.prefetch_next_sub_modules( + sub_module, + numel=self.prefetch_elements, + nvme=self.params_in_nvme_and_cpu) + if self.verbose: + report_memory_usage( + f"Before sub module function {sub_module.__class__.__name__} after prefetch") + + self.param_coordinator.increment_step(sub_module) + + def post_sub_module_forward_function(self, sub_module): + if self.verbose: + report_memory_usage( + f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release") + + self.param_coordinator.release_sub_module(sub_module) + if self.verbose: + report_memory_usage( + f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release") + + def pre_sub_module_backward_function(self, sub_module): + self.param_coordinator.record_trace(sub_module) + + self.param_coordinator.fetch_sub_module(sub_module) + + self.param_coordinator.prefetch_next_sub_modules(sub_module, + numel=self.prefetch_elements) + + self.param_coordinator.increment_step(sub_module) + + def post_sub_module_backward_function(self, sub_module): + if self.verbose: + report_memory_usage( + f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release") + self.param_coordinator.release_sub_module(sub_module) + + if self.verbose: + report_memory_usage( + f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release") + + def _release_ipg_buffers(self): + if self.contiguous_gradients: + self.ipg_buffer = None + if not self.offload_optimizer and self.is_gradient_accumulation_boundary: + self.grads_in_partition = None + + self.grads_in_partition_offset = 0 + + def _optimizer_step(self, sub_group_id): + param_group_id = self.sub_group_to_group_id[sub_group_id] + fp32_param = self.fp32_partitioned_groups_flat[sub_group_id] + fp16_param = self.fp16_partitioned_groups_flat[sub_group_id] + self.optimizer.param_groups[param_group_id]['params'] = [fp32_param] + + self.optimizer.step() + self.optimizer.param_groups[param_group_id]['params'] = [] + + def _swappable_optimizer_subgroup(self, sub_group_id): + if not self.swap_optimizer: + return False + + return self.optimizer_swapper.swappable_tensor( + None, + numel=self.fp16_partitioned_groups_flat_numel[sub_group_id]) + + def _partitioned_params_swap_out(self, i): + offset = 0 + fp32_param = self.fp32_partitioned_groups_flat[i] + assert fp32_param is not None, \ + f'fp32 parameters of sub_group {i} is None' + + swap_fp16_params = [] + swap_fp32_params = [] + for param, partitioned_param in zip(self.fp16_groups[i], self.fp16_partitioned_groups[i]): + src = fp32_param.narrow(0, offset, partitioned_param.ds_numel) + if partitioned_param.status == PartitionedParamStatus.AVAILABLE: + partitioned_param.data.copy_(src.data) + else: + swap_fp32_params.append(src) + swap_fp16_params.append(param) + offset += partitioned_param.ds_numel + + if len(swap_fp16_params): + swap_fp16_params[0].nvme_swapper.swap_out_partitioned_params( + dst_fp16_params=swap_fp16_params, + src_fp32_params=swap_fp32_params) + + def initialize_optimizer_states(self): + num_subgroups = len(self.fp16_groups) + + largest_numel = max( + [sum([p.ds_numel for p in psg]) for psg in self.fp16_partitioned_groups]) + gradient_dtype = self.fp32_partitioned_groups_flat[0].dtype + gradient_buffer = torch.zeros(int(largest_numel), + dtype=gradient_dtype, + device=self.device) + + timers = self.timers + timer_names = set() + + if self.swap_optimizer: + self.optimizer_swapper.init_timers() + + INIT_OPTIMIZER_TIMER = 'init_optimizer_state' + timer_names.add(INIT_OPTIMIZER_TIMER) + self.start_timers([INIT_OPTIMIZER_TIMER]) + + for i, group in enumerate(self.fp16_groups): + swappable_optimizer_subgroup = self._swappable_optimizer_subgroup( + i) + swappable_param_subgroup = self.fp16_partitioned_groups_flat[i] is None + + num_elements = int(self.fp16_partitioned_groups_flat_numel[i]) + + if self.verbose: + report_memory_usage( + f'[Begin] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}') + + if swappable_optimizer_subgroup: + self._optimizer_states_and_gradient_swap_in(i, timer_names) + + if self.offload_optimizer and not swappable_optimizer_subgroup: + subgroup_gradient_buffer = torch.zeros(num_elements, + dtype=gradient_dtype, + device=self.device) + if self.offload_optimizer_pin_memory: + subgroup_gradient_buffer = subgroup_gradient_buffer.pin_memory() + + self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer + else: + self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow( + 0, + 0, + num_elements) + + self._optimizer_step(i) + + if swappable_param_subgroup: + self._partitioned_params_swap_out(i) + + if swappable_optimizer_subgroup: + self._optimizer_states_and_gradient_swap_out(i, timer_names) + + if self.verbose: + report_memory_usage( + f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}') + + self.stop_timers([INIT_OPTIMIZER_TIMER]) + self.log_timers(timer_names) + + if self.swap_optimizer: + self.optimizer_swapper.log_timers() + + if not self.offload_optimizer: + for group in self.fp32_partitioned_groups_flat: + group.grad = None + + # Reset steps + return + + ######################################################################### + #########################ZeRO Partition Gradients######################## + ######################################################################### + + def get_first_param_index(self, group_id, param_group, partition_id): + for index, param in enumerate(param_group): + param_id = self.get_param_id(param) + if partition_id in self.param_to_partition_ids[group_id][param_id]: + return index + return None + + def initialize_gradient_partitioning_data_structures(self): + + total_partitions = dist.get_world_size(group=self.dp_process_group) + + for i, param_group in enumerate(self.fp16_groups): + + self.param_to_partition_ids[i] = {} + self.is_partition_reduced[i] = {} + self.total_grads_in_partition[i] = {} + self.remaining_grads_in_partition[i] = {} + self.is_grad_computed[i] = {} + self.grad_partition_insertion_offset[i] = {} + self.grad_start_offset[i] = {} + self.first_param_index_in_partition[i] = {} + + for partition_id in range(total_partitions): + self.is_grad_computed[i][partition_id] = {} + self.grad_partition_insertion_offset[i][partition_id] = {} + self.grad_start_offset[i][partition_id] = {} + self.initialize_gradient_partition( + i, param_group, partition_id) + self.is_partition_reduced[i][partition_id] = False + self.first_param_index_in_partition[i][ + partition_id] = self.get_first_param_index( + i, + param_group, + partition_id) + + def independent_gradient_partition_epilogue(self): + if self.verbose: + self.report_ipg_memory_usage( + f"In ipg_epilogue before reduce_ipg_grads", 0) + self.reduce_ipg_grads() + if self.verbose: + self.report_ipg_memory_usage( + f"In ipg_epilogue after reduce_ipg_grads", 0) + + if self.overlap_comm: + self.reduction_stream.synchronize() + + with torch.cuda.stream(self.reduction_stream): + self.partition_previous_reduced_grads() + + # if dist.get_rank() == 0: + # print()("Params already reduced %s", self.params_already_reduced) + for i in range(len(self.params_already_reduced)): + self.params_already_reduced[i] = False + + # in case of cpu offload, averaged gradients are already in fp32_partitioned_groups_flat.grad + # TODO: use a similar code path for both cpu_offload and non-cpu offload + if not self.offload_optimizer: + for i, sub_group in enumerate(self.fp16_groups): + self.averaged_gradients[i] = [ + torch.zeros_like(param.ds_tensor) if param.grad is None else + param.grad.data.narrow(0, + 0, + param.ds_tensor.numel()) + for param in sub_group + ] + # self.averaged_gradients[i] = self.get_flat_partition( + # self.fp16_groups[i], + # 0, + # self.fp32_partitioned_groups_flat[i].numel(), + # return_tensor_list=True) + + self._release_ipg_buffers() + + if self.verbose: + report_memory_usage(f"End ipg_epilogue") + + # resets all partition to no reduced + # sets remianing grads to the total number of grads in each partition + # set is grad computed to false for all grads in partition + def reset_partition_gradient_structures(self): + total_partitions = dist.get_world_size(group=self.dp_process_group) + for i, _ in enumerate(self.fp16_groups): + for partition_id in range(total_partitions): + self.is_partition_reduced[i][partition_id] = False + self.remaining_grads_in_partition[i][ + partition_id] = self.total_grads_in_partition[i][partition_id] + + for param_id in self.is_grad_computed[i][partition_id]: + self.is_grad_computed[i][partition_id][param_id] = False + + def initialize_gradient_partition(self, i, param_group, partition_id): + def set_key_value_list(dictionary, key, value): + if key in dictionary: + dictionary[key].append(value) + else: + dictionary[key] = [value] + + def increment_value(dictionary, key): + if key in dictionary: + dictionary[key] += 1 + else: + dictionary[key] = 1 + + partition_size = self.partition_size[i] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for param in param_group: + + param_size = param.numel() + param_id = self.get_param_id(param) + + if (current_index >= start_index and current_index < end_index): + set_key_value_list(self.param_to_partition_ids[i], + param_id, + partition_id) + increment_value(self.total_grads_in_partition[i], partition_id) + + self.is_grad_computed[i][partition_id][param_id] = False + + self.grad_partition_insertion_offset[i][partition_id][ + param_id] = current_index - start_index + self.grad_start_offset[i][partition_id][param_id] = 0 + + elif start_index > current_index and start_index < (current_index + + param_size): + assert ( + first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + set_key_value_list(self.param_to_partition_ids[i], + param_id, + partition_id) + increment_value(self.total_grads_in_partition[i], partition_id) + + self.is_grad_computed[i][partition_id][param_id] = False + + self.grad_partition_insertion_offset[i][partition_id][param_id] = 0 + self.grad_start_offset[i][partition_id][param_id] = first_offset + + current_index = current_index + param_size + + def overlapping_partition_gradients_reduce_epilogue(self): + self.independent_gradient_partition_epilogue() + self.zero_grad() + + def create_reduce_and_remove_grad_hooks(self): + if self.verbose: + print_rank_0(f'[Begin] Create gradient reduction hooks') + self.grad_accs = [] + for i, param_group in enumerate(self.fp16_groups): + for param in param_group: + if param.requires_grad: + # print_rank_0(f" Before all gather {param.device}, {param.shape}") + + # The hook must be created in un-partitioned parameter + param.all_gather() + + # print(f"After all gather {param.device}, {param.shape}") + def wrapper(param, i): + param_tmp = param.expand_as(param) + grad_acc = param_tmp.grad_fn.next_functions[0][0] + + def reduce_partition_and_remove_grads(*notneeded): + self.reduce_ready_partitions_and_remove_grads( + param, i) + + grad_acc.register_hook( + reduce_partition_and_remove_grads) + self.grad_accs.append(grad_acc) + + # print(f"param grad fn {param.expand_as(param).grad_fn}") + wrapper(param, i) + + # Partition the parameter after creating the hook + param.partition() + if self.verbose: + print_rank_0(f'[End] Create gradient reduction hooks') + + def get_param_id(self, param): + unique_id = id(param) + return self.param_id[unique_id] + + def report_ipg_memory_usage(self, tag, param_elems): + elem_count = self.elements_in_ipg_bucket + param_elems + percent_of_bucket_size = ( + 100.0 * elem_count) // self.reduce_bucket_size + report_memory_usage( + f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}") + + ###############Idependent Partition Gradient ######################## + def reduce_independent_p_g_buckets_and_remove_grads(self, param, i): + # print_rank_0(f"Inside reduce ipg buckets. {debug_param2name_id_shape(param)}, ipg elements {self.elements_in_ipg_bucket}, reduce bucket size {self.reduce_bucket_size}", force=True) + + # Because the ipg bucket is initialized with a random place holder tensor, we must + # explicitly check that the bucket has any real data in it (self.elements_in_ipg_bucket > + # 0). Otherwise if the incoming param.ds_numel is large, this branch may get triggered on a + # garbage data and `self.average_tensor()` will crash because its params_to_reduce will be + # empty, while reduction_list will have that garbage data. + if self.elements_in_ipg_bucket > 0 and self.elements_in_ipg_bucket + param.ds_numel > self.reduce_bucket_size: + if self.verbose: + self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", + param.ds_numel) + + self.reduce_ipg_grads() + + if self.contiguous_gradients and self.overlap_comm: + # Swap ipg_index between 0 and 1 + self.ipg_index = 1 - self.ipg_index + if self.verbose: + self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads", + param.ds_numel) + + param_id = self.get_param_id(param) + assert self.params_already_reduced[param_id] == False, \ + f"The parameter {param_id} has already been reduced. \ + Gradient computed twice for this partition. \ + Multiple gradient reduction is currently not supported" + + # keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening + if param.ds_numel > self.reduce_bucket_size: + self.extra_large_param_to_reduce = param + + elif self.contiguous_gradients: + # print_rank_0("before new grad tensor move") + new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow( + 0, + self.elements_in_ipg_bucket, + param.ds_numel) + # print_rank_0("after new grad tensor move") + new_grad_tensor.copy_(param.grad.view(-1)) + param.grad.data = new_grad_tensor.data.view_as(param.grad) + + self.elements_in_ipg_bucket += param.ds_numel + self.grads_in_ipg_bucket.append(param.grad) + self.params_in_ipg_bucket.append((i, param, param_id)) + if self.verbose: + self.report_ipg_memory_usage("End ipg_remove_grads", 0) + + def gradient_reduction_w_predivide(self, tensor): + dp_world_size = dist.get_world_size(group=self.dp_process_group) + + tensor_to_allreduce = tensor + + if self.allreduce_always_fp32: + tensor_to_allreduce = tensor.float() + + if self.postscale_gradients: + if self.gradient_predivide_factor != 1.0: + tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor) + + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + + if self.gradient_predivide_factor != dp_world_size: + tensor_to_allreduce.mul_( + self.gradient_predivide_factor / dp_world_size) + else: + tensor_to_allreduce.div_(dp_world_size) + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + + if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce: + tensor.copy_(tensor_to_allreduce) + + return tensor + + def average_tensor(self, tensors, params_to_reduce): + with torch.cuda.stream(self.reduction_stream): + if not self.reduce_scatter: + for tensor in tensors: + self.gradient_reduction_w_predivide(tensor) + return + + for tensor in tensors: + tensor.div_(dist.get_world_size(group=self.dp_process_group)) + + # reduction resulting with each rank only holding the gradient partition it owns + # This could either be a reduce scatter or a reduce op depending on how + # parameters are partitionied. The method is implemented by the + # DeepSpeed param extensions to the pytorch parameter, so its up to + # the extension to define what happens here + params_to_reduce[0].reduce_gradients_at_owner( + param_list=params_to_reduce, + hierarchy=self.param_coordinator.hierarchy) + + def set_grad_positions(self): + for i, group in enumerate(self.fp16_groups): + current_offset = 0 + for param in group: + param_id = self.get_param_id(param) + num_elements = param.ds_tensor.ds_numel + + self.grad_position[param_id] = [ + int(i), + int(current_offset), + int(num_elements) + ] + # print(f"param id {param_id} i:{i}, ds_tensor {num_elements} numel {param.numel()}") + current_offset += num_elements + + def async_accumulate_grad_in_cpu_via_gpu(self, param, acc_grad_cpu_partition): + + # copy to a preexisiting buffer to avoid memory allocation penalty + dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow( + 0, + 0, + param.ds_tensor.ds_numel) + + if self.micro_step_id > 0: + dest_buffer.copy_( + acc_grad_cpu_partition.view(-1), non_blocking=True) + param.grad.data.view(-1).add_(dest_buffer) + + # at the boundary we will send 32bit directly + if not self.is_gradient_accumulation_boundary: + acc_grad_cpu_partition.data.copy_(param.grad.data.view(-1), + non_blocking=True) + + def _constant_buffered_norm2(self, input, buffer_size=250000000): + norm = None + for part in input.view(-1).split(buffer_size): + if norm is None: + norm = part.data.double().norm(2) ** 2.0 + else: + norm += part.data.double().norm(2) ** 2.0 + return norm ** 0.5 + + def set_norm_for_param_grad_in_gpu(self, param): + param_id = self.get_param_id(param) + # self.norm_for_param_grads[param_id] = param.grad.data.double().norm(2) + # Using a more memory efficient version + self.norm_for_param_grads[param_id] = self._constant_buffered_norm2( + param.grad) + + def update_overflow_tracker_for_param_grad(self, param): + # Credit to our user David Minn + if param.grad is not None: + if self.overlap_comm: + self.gpu_sum = self.gpu_sum + param.grad.data.float().sum() + elif self._has_inf_or_nan(param.grad.data): + self.local_overflow = True + + def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor): + with torch.cuda.stream(self.copy_grad_stream): + param_id = self.get_param_id(param) + src_tensor = param.grad.view(-1).float() + # print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}") + fp32_grad_tensor.copy_(src_tensor, non_blocking=True) + param.grad = None + + def complete_grad_norm_calculation_for_cpu_offload(self, params): + total_norm = 0.0 + norm_type = 2.0 + for p in params: + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + param_id = self.get_param_id(p) + if param_id in self.norm_for_param_grads.keys(): + param_norm = self.norm_for_param_grads[param_id] + total_norm += param_norm.item() ** 2 + + # Sum across all model parallel GPUs. + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.SUM, + group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, + op=torch.distributed.ReduceOp.SUM) + + total_norm = total_norm_cuda[0].item() ** (1. / norm_type) + + if total_norm == float( + 'inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + def partition_previous_reduced_grads(self): + if not self.previous_reduced_grads: + return + + if self.offload_optimizer: + allocate_grads_in_partition = self.grads_in_partition is None \ + and self.gradient_accumulation_steps > 1 + else: + allocate_grads_in_partition = self.grads_in_partition is None + + if allocate_grads_in_partition: + self.grads_in_partition = [] + + for i, group in enumerate(self.fp16_groups): + total_size = 0 + for param_in_partition in group: + total_size += param_in_partition.ds_tensor.ds_numel + + if self.verbose: + report_memory_usage( + f"group {i} before creating {total_size} reduced gradients into partition") + if self.offload_param_pin_memory: + self.grads_in_partition.append( + torch.zeros(int(total_size), + dtype=self.dtype, + device=self.device).pin_memory()) + else: + self.grads_in_partition.append( + torch.zeros(int(total_size), + dtype=self.dtype, + device=self.device)) + if self.verbose: + report_memory_usage( + f"group {i} after creating {total_size} reduced gradients into partition") + + if self.offload_optimizer: + offload_fp32_gradients = {} + offload_fp32_offsets = {} + + with torch.cuda.stream(self.copy_grad_stream): + self.reduction_stream.synchronize() + for param in self.previous_reduced_grads: + + [i, + dest_offset, + num_elements] = self.grad_position[self.get_param_id(param)] + + if self.offload_optimizer: + param.partition_gradients( + partition_buffers=self.temp_grad_gpu_buffer) + # with torch.cuda.stream(self.copy_grad_stream): + # self.reduction_stream.synchronize() + + if self.gradient_accumulation_steps > 1: + # The allreduce buffer will be rewritted. Copy the gradients in partition to a new buffer + fp16_grad_tensor = self.grads_in_partition[i].narrow( + 0, + dest_offset, + num_elements) + self.async_accumulate_grad_in_cpu_via_gpu( + param, + fp16_grad_tensor) + + if self.is_gradient_accumulation_boundary: + + self.set_norm_for_param_grad_in_gpu(param) + + self.update_overflow_tracker_for_param_grad(param) + + if self._swappable_optimizer_subgroup(i): + if not i in offload_fp32_gradients.keys(): + offload_fp32_gradients[i] = [] + offload_fp32_offsets[i] = [] + + offload_fp32_gradients[i].append( + param.grad.view(-1).float()) + param.grad = None + offload_fp32_offsets[i].append(dest_offset) + else: + fp32_grad_tensor = self.fp32_partitioned_groups_flat[ + i].grad.narrow(0, + dest_offset, + num_elements) + + self.async_inplace_copy_grad_to_fp32_buffer_from_gpu( + param, + fp32_grad_tensor) + else: + # The allreduce buffer will be rewritted. Copy the gradients in partition to a new buffer + fp16_grad_tensor = self.grads_in_partition[i].narrow( + 0, + dest_offset, + num_elements) + param.partition_gradients( + partition_buffers=fp16_grad_tensor, + accumulate=True if self.micro_step_id > 0 else False) + + if self.offload_optimizer and self.swap_optimizer: + for i in offload_fp32_gradients.keys(): + self.optimizer_swapper.swap_out_gradients( + parameter=self.fp32_partitioned_groups_flat[i], + gradient_offsets=offload_fp32_offsets[i], + gradient_tensors=offload_fp32_gradients[i]) + + self.previous_reduced_grads = [] + + def reduce_ipg_grads(self, extra_param=None): + if self.overlap_comm: + self.reduction_stream.synchronize() + + with torch.cuda.stream(self.reduction_stream): + self.partition_previous_reduced_grads() + + params_to_reduce = [param for i, param, + param_id in self.params_in_ipg_bucket] + # print(f"Params in ipg bucket {self.params_in_ipg_bucket}") + # print(f"Reducing {[(debug_param2name_id_shape(param), param.grad) for param in params_to_reduce]}") + # exit(0) + if self.contiguous_gradients: + reduction_list = [self.ipg_buffer[self.ipg_index]] + if self.extra_large_param_to_reduce is not None: + reduction_list.append(self.extra_large_param_to_reduce.grad) + self.extra_large_param_to_reduce = None + self.average_tensor(reduction_list, params_to_reduce) + else: + self.buffered_reduce_fallback( + None, + self.grads_in_ipg_bucket, + elements_per_buffer=self.elements_in_ipg_bucket) + + for _, param, param_id in self.params_in_ipg_bucket: + self.params_already_reduced[param_id] = True + + self.previous_reduced_grads = params_to_reduce + + self.grads_in_ipg_bucket = [] + self.params_in_ipg_bucket = [] + self.elements_in_ipg_bucket = 0 + ##################################################################### + + def reduce_ready_partitions_and_remove_grads(self, param, i): + # print_rank_0(f"Backward {debug_param2name_id_shape(param)}", force=True) + self.reduce_independent_p_g_buckets_and_remove_grads(param, i) + + def zero_reduced_gradients(self, partition_id, i): + def are_all_related_partitions_reduced(params_id): + for partition_id in self.param_to_partition_ids[i][params_id]: + if not self.is_partition_reduced[i][partition_id]: + return False + return True + + for params_id in self.is_grad_computed[i][partition_id]: + if are_all_related_partitions_reduced(params_id): + self.param_dict[params_id].grad = None + + def flatten_and_print(self, message, tensors, start=0, n=5): + flatten_tensor = self.flatten(tensors) + + def print_func(): + print(flatten_tensor.contiguous().view(-1).narrow(0, start, n)) + + self.sequential_execution(print_func, message) + + def get_grads_to_reduce(self, i, partition_id): + def get_reducable_portion(key): + grad = self.param_dict[key].grad + total_elements = grad.numel() + start = self.grad_start_offset[i][partition_id][key] + num_elements = min( + total_elements - start, + self.partition_size[i] - + self.grad_partition_insertion_offset[i][partition_id][key]) + if not pg_correctness_test: + if num_elements == total_elements: + return grad + else: + return grad.contiguous().view(-1).narrow(0, + int(start), + int(num_elements)) + else: + if num_elements == total_elements: + return grad.clone() + else: + return grad.clone().contiguous().view(-1).narrow( + 0, + int(start), + int(num_elements)) + + grads_to_reduce = [] + for key in self.is_grad_computed[i][partition_id]: + grad = get_reducable_portion(key) + grads_to_reduce.append(grad) + return grads_to_reduce + + def sequential_execution(self, function, message, group=None): + if group is None: + group = self.dp_process_group + if dist.get_rank(group=group) == 0: + print(message) + for id in range(dist.get_world_size(group=group)): + if id == dist.get_rank(group=group): + function() + dist.barrier(group=group) + + def set_none_gradients_to_zero(self, i, partition_id): + for param_id in self.is_grad_computed[i][partition_id]: + param = self.param_dict[param_id] + if param.grad is None: + param.grad = torch.zero_like(param) + + ######################Reduction Related Methods############################## + + def allreduce_bucket(self, bucket, allreduce_always_fp32=False, rank=None, log=None): + rank = None + tensor = self.flatten(bucket) + + tensor_to_allreduce = tensor + + if pg_correctness_test: + allreduce_always_fp32 = True + + if allreduce_always_fp32: + tensor_to_allreduce = tensor.float() + + tensor_to_allreduce.div_( + dist.get_world_size(group=self.dp_process_group)) + + if rank is None: + # "All Reducing" + dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) + else: + global_rank = _get_global_rank(self.dp_process_group, rank) + dist.reduce(tensor_to_allreduce, global_rank, + group=self.dp_process_group) + + if allreduce_always_fp32 and tensor is not tensor_to_allreduce: + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + tensor.copy_(tensor_to_allreduce) + + return tensor + + # if rank is specified do a reduction instead of an allreduce + def allreduce_and_copy(self, small_bucket, rank=None, log=None): + with torch.cuda.stream(self.reduction_stream): + allreduced = self.allreduce_bucket( + small_bucket, rank=rank, log=log) + if rank is None or rank == dist.get_rank(group=self.dp_process_group): + for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): + buf.copy_(synced) + + def allreduce_no_retain(self, + bucket, + numel_per_bucket=500000000, + rank=None, + log=None): + small_bucket = [] + numel = 0 + for tensor in bucket: + small_bucket.append(tensor) + numel = numel + tensor.numel() + if numel > numel_per_bucket: + self.allreduce_and_copy(small_bucket, rank=rank, log=None) + small_bucket = [] + if len(small_bucket) > 0: + self.allreduce_and_copy(small_bucket, rank=rank, log=log) + + # allows using reduction of gradients instead of using all_reduce + def buffered_reduce_fallback(self, + rank, + grads, + elements_per_buffer=500000000, + log=None): + split_buckets = split_half_float_double(grads) + + for i, bucket in enumerate(split_buckets): + self.allreduce_no_retain(bucket, + numel_per_bucket=elements_per_buffer, + rank=rank, + log=log) + + ############################################################################# + ############################################################################# + ############################################################################# + + # views the tensor as multiple partitions and returns + # those partitions + def get_data_parallel_partitions(self, tensor): + partitions = [] + + dp = dist.get_world_size(group=self.dp_process_group) + dp_id = dist.get_rank(group=self.dp_process_group) + + total_num_elements = tensor.numel() + + base_size = total_num_elements // dp + remaining = total_num_elements % dp + + start = 0 + for id in range(dp): + partition_size = base_size + if id < remaining: + partition_size = partition_size + 1 + partitions.append(tensor.narrow(0, start, partition_size)) + start = start + partition_size + return partitions + + def get_partition_info(self, tensor_list, partition_size, partition_id): + params_in_partition = [] + params_not_in_partition = [] + + start_index = partition_size * partition_id + end_index = partition_size * (partition_id + 1) + + current_index = 0 + first_offset = 0 + + for tensor in tensor_list: + + tensor_size = tensor.numel() + + if (current_index >= start_index and current_index < end_index): + params_in_partition.append(tensor) + + elif start_index > current_index and start_index < (current_index + + tensor_size): + params_in_partition.append(tensor) + + assert ( + first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition" + first_offset = start_index - current_index + + else: + params_not_in_partition.append(tensor) + + current_index = current_index + tensor_size + + return params_in_partition, params_not_in_partition, first_offset + + def zero_grad(self, set_grads_to_None=True): + """ + Zero FP16 parameter grads. + """ + # FP32 grad should never exist. + # For speed, set model fp16 grad to None by default + for group in self.fp16_groups: + for p in group: + if set_grads_to_None: + p.grad = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def _model_parallel_all_reduce(self, tensor, op): + """ Perform all reduce within model parallel group, if any. + """ + if self.model_parallel_group is None: + pass + else: + torch.distributed.all_reduce(tensor=tensor, + op=op, + group=self.model_parallel_group) + + def get_grad_norm_direct(self, gradients, params, norm_type=2): + """Clips gradient norm of an iterable of parameters. + + This is adapted from ``torch.nn.utils.clip_grad.clip_grad_norm_`` and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + norm_type = float(norm_type) + if norm_type == inf: + total_norm = max(g.data.abs().max() for g in gradients) + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.MAX, + group=self.dp_process_group) + + # Take max across all GPUs. + self._model_parallel_all_reduce(tensor=total_norm_cuda, + op=torch.distributed.ReduceOp.MAX) + total_norm = total_norm_cuda[0].item() + else: + total_norm = 0.0 + # if dist.get_rank() == 0: + # print()(f"Total Norm begining {total_norm}") + for g, p in zip(gradients, params): + if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): + param_norm = g.data.double().norm(2) + total_norm += param_norm.item() ** 2 + # Sum across all model parallel GPUs. + total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) + + torch.distributed.all_reduce(total_norm_cuda, + op=torch.distributed.ReduceOp.SUM, + group=self.dp_process_group) + + self._model_parallel_all_reduce(tensor=total_norm_cuda, + op=torch.distributed.ReduceOp.SUM) + + total_norm = total_norm_cuda[0].item() ** (1. / norm_type) + + if total_norm == float( + 'inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + # creates a flat fused tensor from the tensor list starting at the first_offset + # in the first tensor of the list. If there are not enough elements in the tensor + # list then the flat tensor will be padded with zeros + def get_flat_partition(self, + tensor_list, + first_offset, + partition_size, + return_tensor_list=False): + flat_tensor_list = [] + current_size = 0 + for i, tensor in enumerate(tensor_list): + if tensor.grad is None: + tensor.grad = torch.zeros_like(tensor) + + tensor = tensor.grad + num_elements = tensor.numel() + tensor_offset = 0 + + # we need to offset to get to the right element + if i == 0 and first_offset > 0: + tensor_offset = first_offset + num_elements = num_elements - tensor_offset + + # we dont need all elements of the tensor + if num_elements > (partition_size - current_size): + num_elements = partition_size - current_size + + # we need a narrow view of the tensor based on the tensor offset and number of elements that + # we need from this tensor + if tensor_offset > 0 or num_elements < tensor.numel(): + flat_tensor_list.append(tensor.contiguous().view(-1).narrow( + 0, + int(tensor_offset), + int(num_elements))) + else: + flat_tensor_list.append(tensor) + + current_size = current_size + num_elements + + # this means its the last partition and does not align with the dp boundary. We need to pad before flattening + if current_size < partition_size: + flat_tensor_list.append( + torch.zeros(int(partition_size - current_size), + dtype=tensor_list[0].dtype, + device=tensor_list[0].device)) + + if return_tensor_list: + return flat_tensor_list + + return self.flatten(flat_tensor_list) + + def free_grad_in_param_list(self, param_list): + for p in param_list: + p.grad = None + + def reset_cpu_buffers(self): + self.norm_for_param_grads = {} + self.local_overflow = False + + def log_timers(self, timer_names): + if self.timers is None: + return + + self.timers.log(names=list(timer_names)) + + def start_timers(self, timer_names): + if self.timers is None: + return + + for name in timer_names: + self.timers(name).start() + + def stop_timers(self, timer_names): + if self.timers is None: + return + + for name in timer_names: + self.timers(name).stop() + + def _pre_step(self): + self.micro_step_id = INITIAL_MICRO_STEP_ID + + if self.verbose: + print_rank_0(f"Inside Step function") + report_memory_usage(f"In step before checking overflow") + print_rank_0("Finished Tracing at Beginning of Step") + self.param_coordinator.hierarchy = 0 + self.param_coordinator.finish_tracing(print_trace=True) + + self.param_coordinator.reset_step() + + if self.verbose: + print_rank_0("Finished Tracing at Beginning of Step") + + def _get_norm_groups(self): + norm_groups = [] + for i, group in enumerate(self.fp16_groups): + if self.offload_optimizer: + norm_groups.append( + self.complete_grad_norm_calculation_for_cpu_offload( + self.fp16_groups[i])) + else: + norm_groups.append( + self.get_grad_norm_direct(self.averaged_gradients[i], + self.fp16_groups[i])) + return norm_groups + + def _prepare_fp32_grad_for_sub_group(self, sub_group_id): + partition_id = dist.get_rank(group=self.dp_process_group) + + single_grad_partition = self.flatten(self.averaged_gradients[sub_group_id]).to( + self.fp32_partitioned_groups_flat[sub_group_id].dtype) + + assert single_grad_partition.numel() == self.fp32_partitioned_groups_flat[sub_group_id].numel(), \ + "averaged gradients have different number of elements that partition size {} {} {} {}".format( + single_grad_partition.numel( + ), self.fp32_partitioned_groups_flat[sub_group_id].numel(), sub_group_id, + partition_id) + + self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition + + # release all the gradient since we have already created a necessary copy in dp_grad_partition + self.zero_grad() + + self.averaged_gradients[sub_group_id] = None + + def _prepare_sub_group(self, sub_group_id, timer_names=set()): + if self.verbose: + report_memory_usage( + f'Before prepare optimizer sub group {sub_group_id}') + if self._swappable_optimizer_subgroup(sub_group_id): + self._optimizer_states_and_gradient_swap_in( + sub_group_id, timer_names) + elif not self.offload_optimizer: + self._prepare_fp32_grad_for_sub_group(sub_group_id) + if self.verbose: + report_memory_usage( + f'After prepare optimizer sub group {sub_group_id}') + + def _optimizer_states_and_gradient_swap_in(self, sub_group_id, timer_names=set()): + param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id] + fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id]) + assert self._swappable_optimizer_subgroup(sub_group_id), \ + f'Parameter {fp32_param_id} of numel={param_length} is not swappable' + + OPTIMIZER_SWAP_IN_STATE = 'optimizer_swap_in_state' + if self.verbose: + report_memory_usage( + f'pre-step Before swapping in optimizer tensors {sub_group_id}') + self.start_timers([OPTIMIZER_SWAP_IN_STATE]) + + self.optimizer_swapper.swap_in_optimizer_state( + parameter=self.fp32_partitioned_groups_flat[sub_group_id], + async_parameter=self.next_swappable_fp32_partitioned_groups[sub_group_id]) + + self.stop_timers([OPTIMIZER_SWAP_IN_STATE]) + timer_names.add(OPTIMIZER_SWAP_IN_STATE) + if self.verbose: + report_memory_usage( + f'pre-step After swapping in optimizer tensors {sub_group_id}') + + def _release_sub_group(self, sub_group_id, timer_names=set()): + if self.verbose: + report_memory_usage( + f'Before release optimizer sub group {sub_group_id}') + # get rid of the fp32 gradients. Not needed anymore + if not self.offload_optimizer: + self.fp32_partitioned_groups_flat[sub_group_id].grad = None + + if self._swappable_optimizer_subgroup(sub_group_id): + self._optimizer_states_and_gradient_swap_out( + sub_group_id, timer_names) + if self.verbose: + report_memory_usage( + f'After release optimizer sub group {sub_group_id}') + + # create a flat tensor aligned at the alignment boundary + def flatten_dense_tensors_aligned(self, tensor_list, alignment): + num_elements = 0 + for tens in tensor_list: + num_elements = num_elements + tens.numel() + + remaining = num_elements % alignment + + if remaining: + elements_to_add = alignment - remaining + pad_tensor = torch.zeros(elements_to_add, + device=tensor_list[0].device, + dtype=tensor_list[0].dtype) + padded_tensor_list = tensor_list + [pad_tensor] + + num_elements = num_elements + elements_to_add + else: + padded_tensor_list = tensor_list + + return self.flatten(padded_tensor_list) + + def _optimizer_states_and_gradient_swap_out(self, sub_group_id, timer_names=set()): + param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id] + fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id]) + assert self._swappable_optimizer_subgroup(sub_group_id), \ + f'Parameter {fp32_param_id} of numel={param_length} is not swappable' + + OPTIMIZER_SWAP_OUT_STATE = 'optimizer_swap_out_state' + if self.verbose: + report_memory_usage( + f'post-step Before swapping out optimizer tensors {sub_group_id}') + self.start_timers([OPTIMIZER_SWAP_OUT_STATE]) + + self.optimizer_swapper.swap_out_optimizer_state( + parameter=self.fp32_partitioned_groups_flat[sub_group_id], + async_swap=self.next_swappable_fp32_partitioned_groups[sub_group_id] is + not None) + + self.stop_timers([OPTIMIZER_SWAP_OUT_STATE]) + if self.verbose: + report_memory_usage( + f'post-step After swapping out optimizer tensors {sub_group_id}') + timer_names.add(OPTIMIZER_SWAP_OUT_STATE) + + # get rid of the fp32 gradients. Not needed anymore + self.fp32_partitioned_groups_flat[sub_group_id].grad = None + + def _unflatten_partitioned_parameters(self, sub_group_id): + updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id], + self.fp16_partitioned_groups[sub_group_id]) + + for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params): + partitioned_param.data = q.data + + def _overflow_clean_up(self, prev_scale): + if self.verbose: + report_memory_usage('After overflow before clearing gradients') + self.zero_grad() + + if self.offload_optimizer: + self.reset_cpu_buffers() + else: + self.averaged_gradients = {} + + if self.verbose: + report_memory_usage('After overflow after clearing gradients') + + if torch.distributed.get_rank() == 0: + print( + "[deepscale] OVERFLOW! Rank {} Skipping step. Attempted loss scale: {}, " + "reducing to {}".format(dist.get_rank(), + prev_scale, + self.loss_scale)) + + def _overflow_check_and_loss_scale_update(self): + + # First compute norm for all group so we know if there is overflow + self.check_overflow() + + # loss scaling related computation + prev_scale = self.loss_scale + self._update_scale(self.overflow) + + if self.overflow: + self._overflow_clean_up(prev_scale) + + return self.overflow + + def _post_step(self, timer_names=set()): + if self.offload_optimizer: + self.reset_cpu_buffers() + + # Gathering persisting parameters + if len(self.persistent_parameters) > 0: + self.persistent_parameters[0].all_gather( + self.persistent_parameters) + + if self.swap_optimizer: + self.optimizer_swapper.log_timers() + + self.log_timers(timer_names) + + if self.verbose: + report_memory_usage('After zero_optimizer step') + print_rank_0( + f"------------------Finishing Step-----------------------") + + def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id): + if self.fp16_partitioned_groups_flat[sub_group_id] is not None: + self.fp16_partitioned_groups_flat[sub_group_id].data.copy_( + self.fp32_partitioned_groups_flat[sub_group_id].data) + + # unflatten fp16 parameter subgroup + self._unflatten_partitioned_parameters(sub_group_id) + else: + self._partitioned_params_swap_out(sub_group_id) + + def allreduce_gradients(self): + self.overlapping_partition_gradients_reduce_epilogue() + + def step(self, closure=None): + """ + Not supporting closure. + """ + self._pre_step() + + # checks for overflow, adjust the loss scale accordingly + if self._overflow_check_and_loss_scale_update(): + if self.swap_optimizer: + self.optimizer_swapper.log_timers() + return + + norm_groups = self._get_norm_groups() + + timer_names = set() + + timer_names.add('optimizer_step') + self.start_timers(['optimizer_step']) + + # update parameters one sub group at a time + for sub_group_id, group in enumerate(self.fp16_groups): + # prepare optimizer states, gradients and fp32 parameters for update + self._prepare_sub_group(sub_group_id, timer_names) + + # scale the fp32 gradients + self.unscale_and_clip_grads(sub_group_id, norm_groups) + + # apply the optimizer step on the sub group and copy fp32 parameters to fp16 + self._optimizer_step(sub_group_id) + + # put fp16 parameters in appropriate location + self._reassign_or_swap_out_partitioned_parameters(sub_group_id) + + # release memory or swap out optimizer states of fp32 parameters + self._release_sub_group(sub_group_id, timer_names) + + self.stop_timers(['optimizer_step']) + + self._post_step(timer_names) + return + + def dump_pre_step_gradients(self, debug_fp32_grads): + # Dump gradient norms for debbuging + for i, _ in enumerate(self.fp16_groups): + if self.verbose: + print( + f'Pre-Step Dump Norms for Group {i} FP16P, FP16G, FP32G, FP32GUC') + for fp16_param, fp32_grad in zip(self.fp16_groups[i], debug_fp32_grads[i]): + param_id = self.get_param_id(fp16_param) + fp16_grad_norm = self.debug_fp16_grads[i][param_id] + + fp32_grad_norm = [float(t.data.float().norm(2)) + for t in fp32_grad] + norm_list = [fp16_grad_norm, fp32_grad_norm] + if self.verbose: + print(f'Pre-Step Norms {i} {param_id} = {norm_list}') + + def dump_post_step_gradients(self): + # Dump gradient norms for debbuging + for i, group in enumerate(self.fp16_groups): + if self.verbose: + print( + f'Post-Step Dump Norms for Group {i} FP16P, FP16DS, FP16FLAT, FP32FLAT') + unflat_fp16 = self.unflatten( + self.fp16_groups_flat[i], self.fp16_groups[i]) + unflat_fp32 = self.unflatten(self.fp32_partitioned_groups_flat[i], + self.fp16_groups[i]) + for j, p in enumerate(self.fp16_groups[i]): + param_id = self.get_param_id(p) + param_norm = float(p.data.float().norm(2)) + ds_norm = float(p.ds_tensor.data.float().norm(2)) + + unflat_norm = [ + float(t.data.float().norm(2)) + for t in [unflat_fp16[j], + unflat_fp32[j]] + ] + norm_list = [param_norm, ds_norm] + unflat_norm + if self.verbose: + print(f'Post-Step Norms {i} {param_id} = {norm_list}') + + def unscale_and_clip_grads(self, sub_group_id, norm_groups): + + grad_groups_flat = [ + self.fp32_partitioned_groups_flat[sub_group_id].grad] + + total_norm = 0.0 + for norm in norm_groups: + total_norm += norm ** 2.0 + total_norm = math.sqrt(total_norm) + + # compute combined scale factor for this group + combined_scale = self.loss_scale + if self.clip_grad > 0.: + # norm is in fact norm*scale + clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad + if clip > 1: + combined_scale = clip * self.loss_scale + + for grad in grad_groups_flat: + if isinstance(grad, list): + sub_partitions = grad + for g in sub_partitions: + g.data.mul_(1. / combined_scale) + else: + grad.data.mul_(1. / combined_scale) + + def _check_overflow(self, partition_gradients=True): + self.overflow = self.has_overflow(partition_gradients) + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params, is_grad_list=False): + for p in params: + if p.grad is not None and self._has_inf_or_nan(p.grad.data): + return True + + return False + + def has_overflow_partitioned_grads_serial(self): + for i in range(len(self.fp16_groups)): + for j, grad in enumerate(self.averaged_gradients[i]): + if grad is not None and self._has_inf_or_nan(grad.data, j): + return True + return False + + def has_overflow(self, partition_gradients=True): + if partition_gradients: + if self.overlap_comm: + self.local_overflow = self._has_inf_or_nan(self.gpu_sum) + self.gpu_sum = torch.zeros(1, dtype=torch.float).cuda() + + overflow = self.local_overflow if self.offload_optimizer else self.has_overflow_partitioned_grads_serial( + ) + # overflow = self.has_overflow_partitioned_grads_serial() + overflow_gpu = torch.cuda.ByteTensor([overflow]) + torch.distributed.all_reduce(overflow_gpu, + op=torch.distributed.ReduceOp.MAX, + group=self.dp_process_group) + + else: + params = [] + for group in self.fp16_groups: + for param in group: + params.append(param) + + overflow = self.has_overflow_serial( + params, is_grad_list=partition_gradients) + overflow_gpu = torch.cuda.ByteTensor([overflow]) + + # Since each model parallel GPU carries only part of the model, + # make sure overflow flag is synced across all the model parallel GPUs + self._model_parallel_all_reduce(tensor=overflow_gpu, + op=torch.distributed.ReduceOp.MAX) + + overflow = overflow_gpu[0].item() + return bool(overflow) + + # `x` is a torch.Tensor + @staticmethod + def _has_inf_or_nan(x, j=None): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: + return True + return False + + def backward(self, loss, retain_graph=False): + """ + :attr:`backward` performs the following steps: + + 1. fp32_loss = loss.float() + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves + """ + self.micro_step_id += 1 + if self.verbose: + print_rank_0( + f"Total fully available parameters {self.param_coordinator.total_available_parameter_numel}" + ) + + if self.swap_optimizer: + self.optimizer_swapper.pre_backward() + + if self.verbose: + report_memory_usage(f"Before backward") + + if self.contiguous_gradients: + self.ipg_buffer = [] + buf_0 = torch.empty(self.reduce_bucket_size, + dtype=self.dtype, + device=torch.cuda.current_device()) + self.ipg_buffer.append(buf_0) + + # Use double buffers to avoid data access conflict when overlap_comm is enabled. + if self.overlap_comm: + buf_1 = torch.empty(self.reduce_bucket_size, + dtype=self.dtype, + device=torch.cuda.current_device()) + self.ipg_buffer.append(buf_1) + self.ipg_index = 0 + + self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) + '''Partitioning Parameters that were not partitioned + Usually if parameters of modules whose input parameters do not require + grad computation do not trigger post call and will therefore will remain unpartitioned ''' + self._partition_all_parameters() + + if self.swap_optimizer: + self.optimizer_swapper.post_backward() + + def _partition_all_parameters(self): + for name, param in self.module.named_parameters(recurse=True): + self.param_coordinator.release_and_reset_parameter(param) + + def check_overflow(self, partition_gradients=True): + self._check_overflow(partition_gradients) + + def _update_scale(self, has_overflow=False): + self.loss_scaler.update_scale(has_overflow) + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + return self.loss_scaler.loss_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + cur_scale = property(_get_loss_scale, _set_loss_scale) + + def _get_lean_tensors(self, padded_flattened_tensor, group_tensors, paddings): + # Remove paddings from flattened tensor + individual_tensors = self.unflatten( + padded_flattened_tensor, group_tensors) + lean_lengths = [t.numel() - pad for t, + pad in zip(group_tensors, paddings)] + lean_tensors = [t[:len] + for t, len in zip(individual_tensors, lean_lengths)] + # print()(f'rank {dist.get_rank()}: lean_tensors = {[t.numel() for t in lean_tensors]}') + return lean_tensors + + # TODO REVISIT this for stage 3 + def get_lean_optimizer_state(self): + # Return optimizer states after removing paddings. + # This method assumes that each param group contains a single flattened tensor. + optimizer_groups_state = [] + + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + lean_state = {} + for key, value in self.optimizer.state[p].items(): + if torch.is_tensor(value): + padded_lens = [t.numel() + for t in self.fp16_partitioned_groups[i]] + lean_state[key] = self._get_lean_tensors( + value, + self.fp16_partitioned_groups[i], + self.groups_padding[i]) + lean_flat_len = sum([t.numel() for t in lean_state[key]]) + else: + lean_state[key] = value + + optimizer_groups_state.append(lean_state) + + return optimizer_groups_state + + def get_groups_without_padding(self, groups_with_padding): + # Return group tensor after removing paddings added for alignment to DP world size. + groups_without_padding = [] + for i, group in enumerate(groups_with_padding): + lean_group = self._get_lean_tensors(group, + self.fp16_partitioned_groups[i], + self.groups_padding[i]) + groups_without_padding.append(lean_group) + + return groups_without_padding + + def _set_fp32_optimizer_param_groups(self): + for sub_group_id, _ in enumerate(self.fp16_groups): + param_group_id = self.sub_group_to_group_id[sub_group_id] + self.optimizer.param_groups[param_group_id]['params'].append( + self.fp32_partitioned_groups_flat[sub_group_id]) + + def _clear_fp32_optimizer_param_groups(self): + for param_group in self.optimizer.param_groups: + param_group['params'] = [] + + def _rigid_state_dict(self): + state_dict = {} + state_dict['zero_stage'] = ZERO_OPTIMIZATION_WEIGHTS + state_dict['loss_scaler'] = self.loss_scaler + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['overflow'] = self.overflow + state_dict['partition_count'] = self.partition_count + + self._set_fp32_optimizer_param_groups() + state_dict['optimizer_state_dict'] = self.optimizer.state_dict() + state_dict['fp32_flat_groups'] = self.fp32_partitioned_groups_flat + self._clear_fp32_optimizer_param_groups() + + return state_dict + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + + Example:: + + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + if self.elastic_checkpoint: + raise NotImplementedError( + "ZeRO-3 does not yet support elastic checkpointing, please disable for now." + ) + + if self.swap_optimizer or self.params_in_nvme_and_cpu: + raise NotImplementedError( + "ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now." + ) + + return self._rigid_state_dict() + + # Restore base optimizer fp32 weights from checkpoint by: + # 1) Merging fp32 weights from checkpoints of all partitions + # 2) Extracting fp32 weights for current partition from merged weights + # 3) Using extracted weights to update base optimizer weights directly. + + def _restore_from_fp32_weights(self, all_state_dict): + + flat_local_partition = [] + for i in range(len(self.fp32_partitioned_groups_flat)): + merged_partitions = [sd['fp32_groups'][i] for sd in all_state_dict] + flat_local_partition.append( + self._get_flattened_partition(merged_partitions)) + + for current, saved in zip(self.fp32_partitioned_groups_flat, flat_local_partition): + current.data.copy_(saved.data) + + # Restore base optimizer fp32 weights from ZeRO fp16 weights + def _restore_from_fp16_weights(self): + for fp16_partitions, fp32_partition in zip(self.fp16_partitioned_groups_flat, + self.fp32_partitioned_groups_flat): + fp32_partition.data.copy_(fp16_partitions.data) + + # Refresh the fp32 master params from the fp16 copies. + def refresh_fp32_params(self): + self._restore_from_fp16_weights() + + # Extract flattened partion for current rank from all partitions + def _get_flattened_partition(self, all_partition_states): + partition_id = dist.get_rank(group=self.dp_process_group) + alignment = dist.get_world_size(group=self.dp_process_group) + + param_partitions = [[] for _ in range(len(all_partition_states[0]))] + for i, partition in enumerate(all_partition_states): + for j, param in enumerate(partition): + param_partitions[j].append(param) + + local_state_partitions = [] + for param_index, param_slices in enumerate(param_partitions): + flattened_merged_tensor = self.flatten_dense_tensors_aligned( + param_slices, + alignment) + new_partitions = self.get_data_parallel_partitions( + flattened_merged_tensor) + local_state_partitions.append(new_partitions[partition_id]) + + if torch.is_tensor(local_state_partitions[0]): + return self.flatten_dense_tensors_aligned(local_state_partitions, alignment) + + # Assume non-tensor states are not partitioned and equal across ranks, so return first one + return local_state_partitions[0] + + # Restore base optimizer state from checkpoint by + # 1) Merging optimizer state from checkpoints of all partitions + # 2) Extracting optimizer state for current partition from the merged state + # 3) Using the extracted value to directly update the base optimizer. + def _restore_base_optimizer_state(self, all_state_dict): + base_optimizer_group_states = [] + for i in range(len(self.optimizer.param_groups)): + partition_states = {} + all_partition_group_states = [ + sd['base_optimizer_state'][i] for sd in all_state_dict + ] + for key in all_partition_group_states[0].keys(): + all_partition_states = [ + all_states[key] for all_states in all_partition_group_states + ] + partition_states[key] = self._get_flattened_partition( + all_partition_states) + base_optimizer_group_states.append(partition_states) + + for i, group in enumerate(self.optimizer.param_groups): + p = group['params'][0] + for key, saved in base_optimizer_group_states[i].items(): + if torch.is_tensor(self.optimizer.state[p][key]): + self.optimizer.state[p][key].data.copy_(saved.data) + else: + self.optimizer.state[p][key] = saved + + def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True): + # I think it should actually be ok to reload the optimizer before the model. + self.loss_scaler = state_dict['loss_scaler'] + self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] + self.overflow = state_dict['overflow'] + + if load_optimizer_states: + self._set_fp32_optimizer_param_groups() + self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) + self._clear_fp32_optimizer_param_groups() + + # restore fp32 partitions + for curr_param, saved_param in zip(self.fp32_partitioned_groups_flat, state_dict['fp32_flat_groups']): + curr_param.data.copy_(saved_param.data) + + # restore fp16 partitions from fp32 + for sub_group_id in range(len(self.fp32_partitioned_groups_flat)): + fp32_param = self.fp32_partitioned_groups_flat[sub_group_id] + fp16_param = self.fp16_partitioned_groups_flat[sub_group_id] + fp16_param.data.copy_(fp32_param.data) + + # update fp16 unflattened params + for sub_group_id in range(len(self.fp16_partitioned_groups_flat)): + updated_params = self.unflatten( + self.fp16_partitioned_groups_flat[sub_group_id], + self.fp16_partitioned_groups[sub_group_id]) + + for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params): + partitioned_param.data = q.data + + # TODO: Support different/changing load/save DP degree. + def load_state_dict(self, + state_dict_list, + load_optimizer_states=True, + load_from_fp32_weights=False): + r"""Loading a ZeRO checkpoint + + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + + Arguments: + state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. + Note that the number of saved partitions may differ from number of loading partitions to support + changing GPU count, specifically DP world size, between saving and loading checkpoints. + load_optimizer_states: Boolean indicating whether or not to load base optimizer states + load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32 + copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss). + + Example:: + + model = torch.nn.Linear(D_in, D_out).cuda().half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + + if self.elastic_checkpoint: + raise NotImplementedError( + "ZeRO-3 does not yet support elastic checkpointing, please disable for now." + ) + + if self.swap_optimizer or self.params_in_nvme_and_cpu: + raise NotImplementedError( + "ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now." + ) + + self._rigid_load_state_dict( + state_dict_list[dist.get_rank(group=self.dp_process_group)], + load_optimizer_states=load_optimizer_states) + + if len(self.persistent_parameters) > 0: + self.persistent_parameters[0].partition(self.persistent_parameters) + self.persistent_parameters[0].all_gather( + self.persistent_parameters) + + def save_checkpoint_prologue(self): + self._partition_all_parameters() + + def save_checkpoint_epilogue(self): + if len(self.persistent_parameters) > 0: + self.persistent_parameters[0].all_gather( + self.persistent_parameters) + + +def _handle_overflow(cpu_sum, x, i): + import math + rank = torch.distributed.get_rank() + if rank == 0: + t_i = -1 + for v_i, v in enumerate(x.data.contiguous().view(-1)): + if not math.isfinite(float(v)): + t_i = v_i + break + print( + f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}" + ) + + +def estimate_zero3_model_states_mem_needs(total_params, + largest_layer_params, + num_gpus_per_node=1, + num_nodes=1, + cpu_offload=True, + cpu_offload_params=True, + zero_init=True, + additional_buffer_factor=1.5): + total_gpus = num_nodes * num_gpus_per_node + gpus_factor = 1 / num_nodes + largest_layer_memory = (4 * largest_layer_params) + + if cpu_offload: + if cpu_offload_params: + gpu_mem = largest_layer_memory + + if zero_init: + cpu_mem = total_params * 18 * gpus_factor * additional_buffer_factor + else: + cpu_mem = total_params * max(4 * num_gpus_per_node, + 18 * gpus_factor) * additional_buffer_factor + + else: + gpu_mem = largest_layer_memory + int(2 * total_params / total_gpus) + + if zero_init: + cpu_mem = total_params * 16 * gpus_factor * additional_buffer_factor + else: + cpu_mem = total_params * max(4 * num_gpus_per_node, + 16 * gpus_factor) * additional_buffer_factor + else: + gpu_mem = largest_layer_memory + int(18 * total_params / total_gpus) + if zero_init: + cpu_mem = largest_layer_params * 4 * num_gpus_per_node * additional_buffer_factor + else: + cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor + + return int(cpu_mem), int(gpu_mem), largest_layer_memory + + +def model_to_params(model): + # shared params calculated only once + total_params = sum( + dict((p.data_ptr(), + p.numel()) for p in model.parameters()).values()) + + largest_layer_params = 0 + for m in model.modules(): + # assuming no shared params within a single layer + layer_params = sum(p.numel() for p in m.parameters(recurse=False)) + largest_layer_params = max(largest_layer_params, layer_params) + + return total_params, largest_layer_params + + +def estimate_zero3_model_states_mem_needs_all_live(model, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients + for a given ``model`` and hardware setup. + + If you have an actual model object, use this function and everything will be derived + automatically. + + If it's a hypothetical model, use ``estimate_zero3_model_states_mem_needs_all_cold`` where you have to pass + the ``total_params`` and ``largest_layer_params`` explicitly. + + Args: + - ``model``: ``nn.Module`` object + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + total_params, largest_layer_params = model_to_params(model) + + estimate_zero3_model_states_mem_needs_all_cold( + total_params=total_params, + largest_layer_params=largest_layer_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + additional_buffer_factor=additional_buffer_factor) + + +def estimate_zero3_model_states_mem_needs_all_cold(total_params, + largest_layer_params, + num_gpus_per_node=1, + num_nodes=1, + additional_buffer_factor=1.5): + """ + Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients + for a given ``model`` and hardware setup. + + If it's a hypothetical model, use this function where you have to pass + the ``total_params`` and ``largest_layer_params`` explicitly. + + If you have an actual model object, use ``estimate_zero3_model_states_mem_needs_all_live`` and everything + will be derived automatically. + + Args: + - ``total_params``: total model params + - ``largest_layer_params``: largest layer's params + - ``num_gpus_per_node``: how many gpus per node (defaults to 1) + - ``num_nodes``: how many nodes (defaults to 1), + - ``additional_buffer_factor``: estimation factor (defaults to 1.5): + + """ + + def format_options(cpu_offload, cpu_offload_params, zero_init): + enabled = [] + enabled.append(f"cpu_offload={1 if cpu_offload else 0}") + enabled.append(f"cpu_offload_params={1 if cpu_offload_params else 0}") + enabled.append(f"zero_init={1 if zero_init else 0}") + return ", ".join(enabled) + + nodes_str = "nodes" if num_nodes > 1 else "node" + gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU" + print( + "Estimated memory needed for params, optim states and gradients for a:\n" + f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n" + f"SW: Model with {int(total_params / 1e6)}M total params, {int(largest_layer_params / 1e6)}M largest layer params." + ) + print(" per CPU | per GPU | Options") + for cpu_offload in [True, False]: + for cpu_offload_params in [True, False]: + if not cpu_offload and cpu_offload_params: + continue + for zero_init in [True, False]: + cpu_mem, gpu_mem, largest_layer_memory = estimate_zero3_model_states_mem_needs( + total_params=total_params, + largest_layer_params=largest_layer_params, + num_gpus_per_node=num_gpus_per_node, + num_nodes=num_nodes, + cpu_offload=cpu_offload, + cpu_offload_params=cpu_offload_params, + zero_init=zero_init, + additional_buffer_factor=additional_buffer_factor + ) + + options_str = format_options(cpu_offload=cpu_offload, + cpu_offload_params=cpu_offload_params, + zero_init=zero_init) + print( + f" {cpu_mem / 2 ** 30:7.2f}GB | {gpu_mem / 2 ** 30:6.2f}GB | {options_str}") diff --git a/colossalai/registry/__init__.py b/colossalai/registry/__init__.py new file mode 100644 index 000000000..99aedc495 --- /dev/null +++ b/colossalai/registry/__init__.py @@ -0,0 +1,22 @@ +import torch.distributed.optim as dist_optim +import torch.nn as nn +import torch.optim as optim +import torchvision.models as tv_models +from torchvision.transforms import transforms + +from .registry import Registry + +LAYERS = Registry('layers', third_party_library=[nn]) +LOSSES = Registry('losses') +MODELS = Registry('models', third_party_library=[tv_models]) +OPTIMIZERS = Registry('optimizers', third_party_library=[optim, dist_optim]) +OPTIMIZER_WRAPPERS = Registry('optimizer_wrappers') +DATASETS = Registry('datasets') +DIST_GROUP_INITIALIZER = Registry('dist_group_initializer') +GRADIENT_HANDLER = Registry('gradient_handler') +LOSSES = Registry('losses', third_party_library=[nn]) +HOOKS = Registry('hooks') +TRANSFORMS = Registry('transforms', third_party_library=[transforms]) +PIPE_ALLOC_POLICY = Registry('pipeline_allocation_policy') +SAMPLERS = Registry('samplers') +LR_SCHEDULERS = Registry('lr_schedulers') diff --git a/colossalai/registry/registry.py b/colossalai/registry/registry.py new file mode 100644 index 000000000..3ea858b7e --- /dev/null +++ b/colossalai/registry/registry.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from types import ModuleType +from typing import List + + +class Registry: + """This is a registry class used to register classes and modules so that a universal + object builder can be enabled. + + :param name: The name of the registry + :type name: str + :param third_party_library: List of third party libraries which are used in the + initialization of the register module + :type third_party_library: list, optional + """ + + def __init__(self, name: str, third_party_library: List[ModuleType] = None): + self._name = name + self._registry = dict() + self._third_party_lib = third_party_library + + @property + def name(self): + return self._name + + def register_module(self, module_class): + """Registers a module represented in `module_class`. + + :param module_class: The module to be registered + :type module_class: class + :raises AssertionError: Raises an AssertionError if the module has already been + registered before + :return: The module to be registered, so as to use it normally if via importing + :rtype: class + """ + module_name = module_class.__name__ + assert module_name not in self._registry + self._registry[module_name] = module_class + + # return so as to use it normally if via importing + return module_class + + def get_module(self, module_name: str): + """Retrieves a module with name `module_name` and returns the module if it has + already been registered before. + + :param module_name: The name of the module to be retrieved + :type module_name: str + :raises NameError: Raises a NameError if the module to be retrieved has neither been + registered directly nor as third party modules before + :return: The retrieved module or None + :rtype: :class:`object` + """ + if module_name in self._registry: + return self._registry[module_name] + elif self._third_party_lib is not None: + for lib in self._third_party_lib: + if hasattr(lib, module_name): + return getattr(lib, module_name) + raise NameError(f'Module {module_name} not found in the registry {self.name}') + + def has(self, module_name: str): + """Searches for a module with name `module_name` and returns a boolean value indicating + whether the module has been registered directly or as third party modules before. + + :param module_name: The name of the module to be searched for + :type module_name: str + :return: A boolean value indicating whether the module has been registered directly or + as third party modules before + :rtype: bool + """ + found_flag = module_name in self._registry + + if self._third_party_lib: + for lib in self._third_party_lib: + if hasattr(lib, module_name): + found_flag = True + break + + return found_flag diff --git a/colossalai/trainer/__init__.py b/colossalai/trainer/__init__.py new file mode 100644 index 000000000..34e38d54a --- /dev/null +++ b/colossalai/trainer/__init__.py @@ -0,0 +1,5 @@ +from ._trainer import Trainer +from .hooks import * +from .metric import Loss, Accuracy2D, Accuracy3D, Accuracy2p5D + +__all__ = ['Trainer', 'Loss', 'Accuracy3D', 'Accuracy2D', 'Accuracy2p5D'] diff --git a/colossalai/trainer/_trainer.py b/colossalai/trainer/_trainer.py new file mode 100644 index 000000000..673349640 --- /dev/null +++ b/colossalai/trainer/_trainer.py @@ -0,0 +1,333 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from typing import Optional +from typing import Union, List + +import torch +from torch import Tensor +from torch.utils.data import DataLoader +from tqdm import tqdm + +from colossalai.builder import build_hooks +from colossalai.checkpointing import save_checkpoint, load_checkpoint, get_checkpoint_path +from colossalai.context import Config +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.utils import get_global_multitimer, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage +from colossalai.nn.data import DataParallelSampler + + +class Trainer: + """This a class tending for easy deployments of users' training and evaluation instead of + writing their own scripts. It is similar with ``ignite.engine`` and ``keras.engine``, but is + called `Trainer`. + + :param engine: Engine responsible for the process function + :param hooks_cfg: The configuration of hooks + :param verbose: If True, additional information will be printed + :type engine: Engine + :type hoooks_cfg: Config, optional + :type verbose: bool, optional + """ + def __init__(self, + engine: Engine, + hooks_cfg: Optional[Config] = None, + verbose: bool = False): + # training-ralated params + self._engine = engine + self._max_epochs = float('inf') + self._max_steps = float('inf') + self._cur_epoch = 0 + self._cur_step = 0 + + # data-related params + self._train_dataloader = None + self._test_dataloader = None + + # misc params + self._display_progress = False + self._logger = get_global_dist_logger() + self._verbose = verbose + + # hooks can store states in this dict, and could be consumed by other hooks + self.states = {} + + # build hooks + self.hooks = list() + if hooks_cfg is not None: + for cfg in hooks_cfg: + hook = build_hooks(cfg, self) + self.hooks.append(hook) + self.hooks.sort(key=lambda hook: hook.priority) + if self._verbose: + for hook in self.hooks: + self._logger.info( + f'build {hook.__class__.__name__} for train, priority = {hook.priority}', ranks=[0]) + + # timer + self._timer = get_global_multitimer() + + @property + def cur_epoch(self): + """Returns the index of the current epoch. + """ + return self._cur_epoch + + @property + def cur_step(self): + """Returns how many iteration steps have been processed. + """ + return self._cur_step + + def call_hooks(self, func, output=None): + """Calls specific hooks in the current time point. + + :param func: A string represents the time point + :param output: Output of the model after running a iteration or None in any other time points + :type func: str + :type output: optional + """ + # Only after iter hook will receive output + for hook in self.hooks: + if output is None: + getattr(hook, func)() + else: + getattr(hook, func)(*output) + + def exceed_max_step(self): + """Checks whether the trainer exceeds the maximum number of runnning iterations. + """ + return self._cur_step >= self._max_steps + + def set_epoch(self, epoch): + """Sets current epoch number. + + :param epoch: Epoch number to be set + :type epoch: int + """ + self._cur_epoch = epoch + + def _recover_steps(self): + step = self.cur_step * self._engine.schedule.num_steps + self._cur_step = step + + def _set_display_progress(self, display_progress: bool): + self._display_progress = display_progress and is_dp_rank_0( + ) and is_tp_rank_0() and is_no_pp_or_last_stage() + + def _train_epoch(self, epoch: int = None): + # set sampler epoch + if epoch is not None and \ + hasattr(self._engine.train_dataloader, 'sampler') and \ + isinstance(self._engine.train_dataloader.sampler, DataParallelSampler): + self._engine.train_dataloader.sampler.set_epoch(epoch) + + self._engine.train() + + progress = range(self._engine.schedule.num_steps) + if self._display_progress: + if epoch is None: + progress = tqdm(progress, desc='[Train]') + else: + progress = tqdm(progress, desc=f'[Epoch {epoch} train]') + + # train 1 epoch + self.call_hooks('before_train_epoch') + self._timer.start('train-epoch') + for _ in progress: + self._cur_step += 1 + + self.call_hooks('before_train_iter') + self._timer.start('train-step') + logits, label, loss = self._engine.step() + self._timer.stop('train-step', keep_in_history=True) + self.call_hooks('after_train_iter', output=(logits, label, loss)) + + if self.exceed_max_step(): + # stop when max iter is reached + break + self._timer.stop('train-epoch', keep_in_history=True) + self.call_hooks('after_train_epoch') + self._timer.reset('train-step') + + def _eval(self, + epoch: int = None, + return_loss: bool = True): + # switch engine status + self._engine.eval() + + self.call_hooks('before_test') + with torch.no_grad(): + # prepare progress bar + progress = range(self._engine.schedule.num_steps) + if self._display_progress: + desc = 'Evaluation' + if epoch is not None: + desc = '[Epoch %d val]' % epoch + progress = tqdm(progress, desc=desc) + + self.call_hooks('before_test_epoch') + self._timer.start('test-epoch') + for _ in progress: + self.call_hooks('before_test_iter') + self._timer.start('test-step') + logits, label, loss = self._engine.step( + return_loss=return_loss) + self._timer.stop('test-step', keep_in_history=True) + self.call_hooks('after_test_iter', + output=(logits, label, loss)) + self._timer.stop('test-epoch', keep_in_history=True) + self.call_hooks('after_test_epoch') + self.call_hooks('after_test') + self._timer.reset('test-step') + self._timer.reset('test-epoch') + + def fit(self, + train_dataloader: DataLoader, + test_dataloader: DataLoader = None, + max_epochs: int = None, + max_steps: int = None, + test_interval: int = 1, + display_progress: bool = False): + """Trains the model to fit training data. + + :param train_dataloader: DataLoader in training + :param test_dataloader: DataLoader in testing + :param max_epochs: Maximum number of epoches + :param max_steps: Maximum number of running iterations + :param test_interval: Interval of testing + :param display_progress: If True, the training progress will be printed + :type train_dataloader: DataLoader + :type test_dataloader: DataLoader + :type max_epochs: int + :type max_steps: int + :type test_interval: int + :type display_progress: bool + """ + + # prepare dataloaders + self._train_dataloader = train_dataloader + self._engine.set_dataloader(self._train_dataloader, train=True) + self._engine.train() + + should_test = False + if test_dataloader is not None: + self._test_dataloader = test_dataloader + self._engine.set_dataloader(self._test_dataloader, train=False) + should_test = True + + # decide the + if max_epochs is not None: + self._max_epochs = max_epochs + if max_steps is not None: + self._max_steps = max_steps + self._set_display_progress(display_progress) + + # start train + self.call_hooks('before_train') + + # recover step value if resuming training + if self.cur_epoch != 0: + self._recover_steps() + + last_epoch = self._cur_epoch + + for epoch in range(last_epoch, self._max_epochs): + self._cur_epoch += 1 + + # train for one epoch + self._train_epoch(epoch) + + # start eval + if should_test and epoch % test_interval == 0: + self._eval(epoch, return_loss=True) + + # check for termination + if self.exceed_max_step(): + self._logger.info( + f"Max number of steps {self._max_steps} has been reached, training is stopped automatically") + break + self.call_hooks('after_train') + self._timer.reset('train-epoch') + + def evaluate(self, + test_dataloader: DataLoader, + display_progress: bool = False): + """Evaluates the model with testing data. + + :param test_dataloader: DataLoader in testing + :param display_progress: If True, the evaluation progress will be printed + :type test_dataloader: DataLoader + :type display_progress: bool, optional + """ + # set dataloader + self._test_dataloader = test_dataloader + self._engine.set_dataloader(self._test_dataloader, train=True) + + # set + self._set_display_progress(display_progress) + + # eval + self._eval(return_loss=True) + + def predict(self, data: Union[Tensor, List[Tensor]]): + """Uses trained model to make a prediction for a tensor or a tensor list. + + :param data: Data as the input + :type data: Union[Tensor, List[Tensor] + :return: The output of model as the prediction + :rtype: Tensor + """ + # predict without labels + if isinstance(data, (list, tuple)): + assert isinstance(data[0], Tensor) + else: + assert isinstance(data, Tensor) + self._engine.eval() + + # prepare a list of (data, label) to make it iterable + # for compatibility with schedule + simple_dataloader = [(data, None)] + self._engine.set_dataloader(simple_dataloader) + output, _, _ = self._engine.step(return_loss=False) + return output + + def save(self, path: str, suffix: str = ''): + """Saves the model to a file. + + :param path: Relative path of the file + :param suffix: Suffix of the file + :type path: str + :type suffix: str, optional + """ + save_path = get_checkpoint_path(path, + self._cur_epoch, + suffix=suffix) + save_checkpoint(save_path, self._cur_epoch, self._engine.get_model(), + self._engine.get_optimizer(), + self._engine.get_lr_scheduler()) + + def load(self, + path: str, + finetune: bool = False, + strict: bool = False): + """Loads parameters to the model from a file. + + :param path: Relative path of the file + :param finetune: Whether allows to load a part of the model + :param strict: Whether loads a model that has the same shape of parameters + :type path: str + :type finetune: bool, optional + :type strict: bool, optional + """ + last_epoch, _ = load_checkpoint(path, + self._engine.get_model(), + self._engine.get_optimizer(), + self._engine.get_lr_scheduler(), + finetune=finetune, + strict=strict) + if finetune: + self.set_epoch(0) + else: + self.set_epoch(last_epoch) diff --git a/colossalai/trainer/hooks/__init__.py b/colossalai/trainer/hooks/__init__.py new file mode 100644 index 000000000..2cc3c78b7 --- /dev/null +++ b/colossalai/trainer/hooks/__init__.py @@ -0,0 +1,11 @@ +from ._base_hook import BaseHook +from ._checkpoint_hook import SaveCheckpointHook, LoadCheckpointHook +from ._metric_hook import LossHook, Accuracy2DHook, AccuracyHook, MetricHook +from ._log_hook import LogMetricByEpochHook, TensorboardHook, LogTimingByEpochHook, LogMemoryByEpochHook + +__all__ = [ + 'BaseHook', 'MetricHook', + 'LoadCheckpointHook', 'SaveCheckpointHook', + 'LossHook', 'AccuracyHook', 'Accuracy2DHook', + 'LogMetricByEpochHook', 'TensorboardHook', 'LogTimingByEpochHook', 'LogMemoryByEpochHook', +] diff --git a/colossalai/trainer/hooks/_base_hook.py b/colossalai/trainer/hooks/_base_hook.py new file mode 100644 index 000000000..4d510ab0f --- /dev/null +++ b/colossalai/trainer/hooks/_base_hook.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from abc import ABC + +from torch import Tensor + +from colossalai.logging import get_global_dist_logger +from .._trainer import Trainer + + +class BaseHook(ABC): + """This class allows users to add desired actions in specific time points + during training or evaluation. + + :param trainer: Trainer attached with current hook + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type priority: int + """ + def __init__(self, trainer: Trainer, priority: int) -> None: + self.trainer = trainer + self.priority = priority + self.logger = get_global_dist_logger() + + def before_train(self): + """Actions before training. + """ + pass + + def after_train(self): + """Actions after training. + """ + pass + + def before_train_iter(self): + """Actions before running a training iteration. + """ + pass + + def after_train_iter(self, output: Tensor, label: Tensor, loss: Tensor): + """Actions after running a training iteration. + + :param output: Output of the model + :param label: Labels of the input data + :param loss: Loss between the output and input data + :type output: Tensor + :type label: Tensor + :type loss: Tensor + """ + pass + + def before_train_epoch(self): + """Actions before starting a training epoch. + """ + pass + + def after_train_epoch(self): + """Actions after finishing a training epoch. + """ + pass + + def before_test(self): + """Actions before evaluation. + """ + pass + + def after_test(self): + """Actions after evaluation. + """ + pass + + def before_test_epoch(self): + """Actions before starting a testing epoch. + """ + pass + + def after_test_epoch(self): + """Actions after finishing a testing epoch. + """ + pass + + def before_test_iter(self): + """Actions before running a testing iteration. + """ + pass + + def after_test_iter(self, output: Tensor, label: Tensor, loss: Tensor): + """Actions after running a testing iteration. + + :param output: Output of the model + :param label: Labels of the input data + :param loss: Loss between the output and input data + :type output: Tensor + :type label: Tensor + :type loss: Tensor + """ + pass + + def init_runner_states(self, key, val): + """Initializes trainer's state. + + :param key: Key of reseting state + :param val: Value of reseting state + """ + if key not in self.trainer.states: + self.trainer.states[key] = val diff --git a/colossalai/trainer/hooks/_checkpoint_hook.py b/colossalai/trainer/hooks/_checkpoint_hook.py new file mode 100644 index 000000000..49fd28948 --- /dev/null +++ b/colossalai/trainer/hooks/_checkpoint_hook.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import torch.distributed as dist + +from colossalai.checkpointing import get_latest_checkpoint_path, get_checkpoint_path +from colossalai.registry import HOOKS +from colossalai.trainer.hooks import BaseHook +from colossalai.trainer import Trainer +from colossalai.utils import is_dp_rank_0 + + +@HOOKS.register_module +class SaveCheckpointHook(BaseHook): + """Saves the model by interval in training process. + + :param trainer: Trainer attached with current hook + :param interval: Saving interval + :param checkpoint_dir: Directory of saving checkpoint + :param suffix: Saving suffix of the file + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type interval: int, optional + :type checkpoint_dir: int, optional + :type suffix: str, optional + :type priority: int, optional + """ + + def __init__(self, + trainer: Trainer, + interval: int = 1, + checkpoint_dir: str = None, + suffix: str = '', + priority: int = 0): + super().__init__(trainer=trainer, priority=priority) + assert isinstance(trainer, Trainer), \ + f'SaveCheckpointHook expects a Trainer, got {type(trainer)}' + self.interval = interval + self.checkpoint_dir = checkpoint_dir + self.suffix = suffix + + def after_train_epoch(self): + """Saves the model after a training epoch. + """ + # save by interval + if self.trainer.cur_epoch % self.interval == 0: + # only gpus with data parallel rank equals to 0 write to the disk + if is_dp_rank_0(): + self.trainer.save(path=self.checkpoint_dir, suffix=self.suffix) + self.logger.info( + f'checkpoint for epoch {self.trainer.cur_epoch} is saved to {self.checkpoint_dir}') + + # wait until everyone is done + if dist.is_initialized(): + dist.barrier() + + +@HOOKS.register_module +class LoadCheckpointHook(BaseHook): + """Loads the model before training process. + + :param trainer: Trainer attached with current hook + :param checkpoint_dir: Directory of saving checkpoint + :param epoch: Epoch number to be set + :param finetune: Whether allows to load a part of the model + :param strict: Whether loads a model that has the same shape of parameters + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type checkpoint_dir: str, optional + :type epoch: str, optional + :type finetune: bool, optional + :type strict: bool, optional + :type priority: int, optional + """ + + def __init__(self, + trainer: Trainer = None, + checkpoint_dir: str = None, + epoch: int = -1, + finetune: bool = False, + strict: bool = False, + priority: int = 10) -> None: + assert isinstance(trainer, Trainer), \ + f'LoadLatestCheckpointHook excepts a Trainer, got {type(trainer)}' + self.epoch = epoch + self.checkpoint_dir = checkpoint_dir + self.finetune = finetune + self.strict = strict + super().__init__(trainer=trainer, priority=priority) + + def before_train(self): + """Loads parameters to the model before training. + """ + if self.epoch == -1: + path = get_latest_checkpoint_path(self.checkpoint_dir) + else: + path = get_checkpoint_path(self.checkpoint_dir, epoch=self.epoch) + if osp.exists(path): + self.trainer.load( + path, finetune=self.finetune, strict=self.strict) + self.logger.info( + f'loaded checkpoint from {path}') + else: + raise FileNotFoundError(f'checkpoint is not found at {path}') + + # Some utilities want to load a checkpoint without distributed being initialized + if dist.is_initialized(): + dist.barrier() diff --git a/colossalai/trainer/hooks/_log_hook.py b/colossalai/trainer/hooks/_log_hook.py new file mode 100644 index 000000000..d7ed4bf56 --- /dev/null +++ b/colossalai/trainer/hooks/_log_hook.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +import os.path as osp + +import torch +from tensorboardX import SummaryWriter + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.registry import HOOKS +from colossalai.trainer._trainer import Trainer +from colossalai.utils import get_global_multitimer, set_global_multitimer_status, report_memory_usage, is_dp_rank_0, \ + is_tp_rank_0, is_no_pp_or_last_stage +from ._metric_hook import MetricHook + + +def _format_number(val): + if isinstance(val, float): + return f'{val:.5f}' + elif torch.is_floating_point(val): + return f'{val.item():.5f}' + return val + + +class EpochIntervalHook(MetricHook): + def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 1): + super().__init__(trainer, priority) + self._interval = interval + + def _is_epoch_to_log(self): + return self.trainer.cur_epoch % self._interval == 0 + + +@HOOKS.register_module +class LogMetricByEpochHook(EpochIntervalHook): + """Specialized Hook to record the metric to log. + + :param trainer: Trainer attached with current hook + :type trainer: Trainer + :param interval: Recording interval + :type interval: int, optional + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type priority: int, optional + """ + + def __init__(self, trainer: Trainer, interval: int = 1, priority: int = 1) -> None: + super().__init__(trainer=trainer, interval=interval, priority=priority) + self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage() + + def _get_str(self, mode): + msg = [] + for metric_name, metric_calculator in self.trainer.states['metrics'][mode].items(): + msg.append( + f'{metric_name} = {_format_number(metric_calculator.get_accumulated_value())}') + msg = ', '.join(msg) + return msg + + def after_train_epoch(self): + if self._is_epoch_to_log(): + msg = self._get_str(mode='train') + + if self._is_rank_to_log: + self.logger.info( + f'Training - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}') + + def after_test_epoch(self): + if self._is_epoch_to_log(): + msg = self._get_str(mode='test') + if self._is_rank_to_log: + self.logger.info( + f'Testing - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}') + + +@HOOKS.register_module +class TensorboardHook(MetricHook): + """Specialized Hook to record the metric to Tensorboard. + + :param trainer: Trainer attached with current hook + :type trainer: Trainer + :param log_dir: Directory of log + :type log_dir: str, optional + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type priority: int, optional + """ + + def __init__(self, trainer: Trainer, log_dir: str, priority: int = 1) -> None: + super().__init__(trainer=trainer, priority=priority) + self._is_rank_to_log = is_no_pp_or_last_stage() + + if self._is_rank_to_log: + # create workspace on only one rank + if gpc.is_initialized(ParallelMode.GLOBAL): + rank = gpc.get_global_rank() + else: + rank = 0 + + log_dir = osp.join(log_dir, f'rank_{rank}') + + # create workspace + if not osp.exists(log_dir): + os.makedirs(log_dir) + + self.writer = SummaryWriter( + log_dir=log_dir, filename_suffix=f'_rank_{rank}') + + def after_train_iter(self, *args): + for metric_name, metric_calculator in self.trainer.states['metrics']['train'].items(): + if metric_calculator.epoch_only: + continue + val = metric_calculator.get_last_step_value() + if self._is_rank_to_log: + self.writer.add_scalar( + f'{metric_name}/train', val, self.trainer.cur_step) + + def after_test_iter(self, *args): + for metric_name, metric_calculator in self.trainer.states['metrics']['test'].items(): + if metric_calculator.epoch_only: + continue + val = metric_calculator.get_last_step_value() + if self._is_rank_to_log: + self.writer.add_scalar(f'{metric_name}/test', val, + self.trainer.cur_step) + + def after_test_epoch(self): + for metric_name, metric_calculator in self.trainer.states['metrics']['test'].items(): + if metric_calculator.epoch_only: + val = metric_calculator.get_accumulated_value() + if self._is_rank_to_log: + self.writer.add_scalar(f'{metric_name}/test', val, + self.trainer.cur_step) + + def after_train_epoch(self): + for metric_name, metric_calculator in self.trainer.states['metrics']['train'].items(): + if metric_calculator.epoch_only: + val = metric_calculator.get_accumulated_value() + if self._is_rank_to_log: + self.writer.add_scalar(f'{metric_name}/train', val, + self.trainer.cur_step) + + +@HOOKS.register_module +class LogTimingByEpochHook(EpochIntervalHook): + """Specialized Hook to write timing record to log. + + :param trainer: Trainer attached with current hook + :type trainer: Trainer + :param interval: Recording interval + :type interval: int, optional + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type priority: int, optional + :param log_eval: Whether writes in evaluation + :type log_eval: bool, optional + """ + + def __init__(self, + trainer: Trainer, + interval: int = 1, + priority: int = 1, + log_eval: bool = True + ) -> None: + super().__init__(trainer=trainer, interval=interval, priority=priority) + set_global_multitimer_status(True) + self._global_timer = get_global_multitimer() + self._log_eval = log_eval + self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() + + def _get_message(self): + msg = [] + for timer_name, timer in self._global_timer: + last_elapsed_time = timer.get_elapsed_time() + if timer.has_history: + history_mean = timer.get_history_mean() + history_sum = timer.get_history_sum() + msg.append( + f'{timer_name}: last elapsed time = {last_elapsed_time}, ' + f'history sum = {history_sum}, history mean = {history_mean}') + else: + msg.append( + f'{timer_name}: last elapsed time = {last_elapsed_time}') + + msg = ', '.join(msg) + return msg + + def after_train_epoch(self): + """Writes log after finishing a training epoch. + """ + if self._is_epoch_to_log() and self._is_rank_to_log: + msg = self._get_message() + self.logger.info( + f'Training - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}') + + def after_test_epoch(self): + """Writes log after finishing a testing epoch. + """ + if self._is_epoch_to_log() and self._is_rank_to_log and self._log_eval: + msg = self._get_message() + self.logger.info( + f'Testing - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}: {msg}') + + +@HOOKS.register_module +class LogMemoryByEpochHook(EpochIntervalHook): + """Specialized Hook to write memory usage record to log. + + :param trainer: Trainer attached with current hook + :type trainer: Trainer + :param interval: Recording interval + :type interval: int, optional + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type priority: int, optional + :param log_eval: Whether writes in evaluation + :type log_eval: bool, optional + """ + + def __init__(self, + trainer: Trainer, + interval: int = 1, + priority: int = 1, + log_eval: bool = True + ) -> None: + super().__init__(trainer=trainer, interval=interval, priority=priority) + set_global_multitimer_status(True) + self._global_timer = get_global_multitimer() + self._log_eval = log_eval + self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() + + def before_train(self): + """Resets before training. + """ + if self._is_epoch_to_log() and self._is_rank_to_log: + report_memory_usage('before-train') + + def after_train_epoch(self): + """Writes log after finishing a training epoch. + """ + if self._is_epoch_to_log() and self._is_rank_to_log: + report_memory_usage( + f'After Train - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}') + + def after_test(self): + """Reports after testing. + """ + if self._is_epoch_to_log() and self._is_rank_to_log and self._log_eval: + report_memory_usage( + f'After Test - Epoch {self.trainer.cur_epoch} - {self.__class__.__name__}') diff --git a/colossalai/trainer/hooks/_metric_hook.py b/colossalai/trainer/hooks/_metric_hook.py new file mode 100644 index 000000000..241ec63d3 --- /dev/null +++ b/colossalai/trainer/hooks/_metric_hook.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from colossalai.context import ParallelMode +from colossalai.registry import HOOKS +from colossalai.utils import is_no_pp_or_last_stage +from ._base_hook import BaseHook +from .._trainer import Trainer +from ..metric import Loss, Accuracy2D, Accuracy, Accuracy2p5D, Accuracy3D + + +class MetricHook(BaseHook): + """Specialized hook classes for :class:`Metric`. + Some help metric collectors initialize, reset and + update their states. Others are used to display and + record the metric. + + :param trainer: Trainer attached with current hook + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type priority: int + """ + + def __init__(self, trainer: Trainer, priority: int): + super().__init__(trainer, priority) + self._is_stage_to_log = is_no_pp_or_last_stage() + self._check_metric_states_initialization() + + def _check_metric_states_initialization(self): + if 'metrics' not in self.trainer.states: + self.init_runner_states('metrics', dict(train={}, test={})) + + +@HOOKS.register_module +class LossHook(MetricHook): + """Specialized hook class for :class:`Loss`. + + :param trainer: Trainer attached with current hook + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type priority: int, optional + """ + + def __init__(self, trainer: Trainer, priority: int = 10): + super().__init__(trainer, priority) + + if self._is_stage_to_log: + self.metric = Loss(epoch_only=False) + + # register the metric calculator + self.trainer.states['metrics']['train'][ + self.metric.__class__.__name__] = self.metric + self.trainer.states['metrics']['test'][ + self.metric.__class__.__name__] = self.metric + + def before_train_epoch(self): + if self._is_stage_to_log: + self.metric.reset() + + def after_train_iter(self, logits, label, loss): + if self._is_stage_to_log: + self.metric.update(loss) + + def before_test_epoch(self): + if self._is_stage_to_log: + self.metric.reset() + + def after_test_iter(self, logits, label, loss): + if self._is_stage_to_log: + self.metric.update(loss) + + +@HOOKS.register_module +class Accuracy2DHook(MetricHook): + """Specialized hook class for :class:`Accuracy2D`. + It acts the same as :class:`AccuracyHook`. + + :param trainer: Trainer attached with current hook + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type priority: int, optional + """ + + def __init__(self, trainer: Trainer, priority: int = 10): + super().__init__(trainer, priority) + + if self._is_stage_to_log: + self.metric = Accuracy2D(epoch_only=True) + + # register the metric + self.trainer.states['metrics']['test'][ + self.metric.__class__.__name__] = self.metric + + def before_test(self): + if self._is_stage_to_log: + self.metric.reset() + + def after_test_iter(self, logits, label, *args): + if self._is_stage_to_log: + self.metric.update(logits, label) + + +@HOOKS.register_module +class Accuracy2p5DHook(MetricHook): + def __init__(self, trainer: Trainer, priority: int = 10): + super().__init__(trainer, priority) + + if self._is_stage_to_log: + self.metric = Accuracy2p5D(epoch_only=True) + + # register the metric + self.trainer.states['metrics']['test'][ + self.metric.__class__.__name__] = self.metric + + def before_test(self): + if self._is_stage_to_log: + self.metric.reset() + + def after_test_iter(self, logits, label, *args): + if self._is_stage_to_log: + self.metric.update(logits, label) + + +@HOOKS.register_module +class Accuracy3DHook(MetricHook): + """Specialized hook class for :class:`Accuracy3D`. + + :param trainer: Trainer attached with current hook + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type priority: int + """ + + def __init__(self, + trainer: Trainer, + input_parallel_mode: ParallelMode, + weight_parallel_mode: ParallelMode, + priority: int = 10): + super().__init__(trainer, priority) + + if self._is_stage_to_log: + self.metric = Accuracy3D(epoch_only=True, + input_parallel_mode=input_parallel_mode, + weight_parallel_mode=weight_parallel_mode) + + # register the metric + self.trainer.states['metrics']['test'][ + self.metric.__class__.__name__] = self.metric + + def before_test(self): + if self._is_stage_to_log: + self.metric.reset() + + def after_test_iter(self, logits, label, *args): + if self._is_stage_to_log: + self.metric.update(logits, label) + + +@HOOKS.register_module +class AccuracyHook(MetricHook): + """Specialized hook class for :class:`Accuracy`. + + :param trainer: Trainer attached with current hook + :param priority: Priority in the printing, hooks with small priority will be printed in front + :type trainer: Trainer + :type priority: int + """ + + def __init__(self, trainer: Trainer, priority: int = 10): + super().__init__(trainer, priority) + + if self._is_stage_to_log: + self.metric = Accuracy(epoch_only=True) + + # register the metric + self.trainer.states['metrics']['test'][ + self.metric.__class__.__name__] = self.metric + + def before_test(self): + if self._is_stage_to_log: + self.metric.reset() + + def after_test_iter(self, logits, label, *args): + if self._is_stage_to_log: + self.metric.update(logits, label) diff --git a/colossalai/trainer/metric.py b/colossalai/trainer/metric.py new file mode 100644 index 000000000..744e0e03a --- /dev/null +++ b/colossalai/trainer/metric.py @@ -0,0 +1,307 @@ +import os +from abc import ABC, abstractmethod + +import torch +import torch.distributed as dist + +from colossalai.communication import all_gather +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer._parallel_utilities import _gather +from colossalai.nn.layer.parallel_3d._utils import get_last_group +from colossalai.utils import get_current_device + + +class Metric(ABC): + """A basic class of metric collectors. It collects a specific + metric during training or evaluation and it's always used with + :class:`MetricHook` to help it update its states and show the + metric. So please use corresponding hook class to make the metric + collector works. + + :param epoch_only: Whether the metric only read for the full epoch + :type epoch_only: bool + """ + + def __init__(self, epoch_only: bool): + # is the metric only read for the full epoch + self._epoch_only = epoch_only + + @property + def epoch_only(self): + """Returns :attr:`epoch_only`. + """ + return self._epoch_only + + @abstractmethod + def reset(self) -> None: + """Resets the metric to it's initial state. + By default, this is called at the start of each epoch. + """ + pass + + @abstractmethod + def update(self, *args, **kwargs) -> None: + """Updates the metric's state using the passed batch output. + By default, this is called once for each batch. + """ + pass + + @abstractmethod + def get_last_step_value(self): + """Returns the metric value in the last iteration. + """ + pass + + @abstractmethod + def get_accumulated_value(self): + """Computes the metric based on it's accumulated state. + By default, this is called at the end of each epoch. + + :return: the actual quantity of interest + :rtype: Any + """ + pass + + @staticmethod + @abstractmethod + def is_better(a, b) -> bool: + """Compares a and b, and returns whether a is better than b + + :return: The result of comparison + :rtype: bool + """ + pass + + +class Loss(Metric): + """A metric collector for loss. + + :param epoch_only: Whether the metric only read for the full epoch + :type epoch_only: bool + """ + + def __init__(self, epoch_only): + super().__init__(epoch_only=epoch_only) + self.last_step_loss = torch.zeros(1, device=get_current_device()) + self.accum_loss = torch.zeros(1, device=get_current_device()) + self.count = 0 + + def reset(self) -> None: + """Sets :attr:`last_step_loss` and :attr:`accum_loss` to zero. + """ + self.last_step_loss.zero_() + self.accum_loss.zero_() + self.count = 0 + + def update(self, loss) -> None: + """Updates :attr:`last_step_loss` and :attr:`accum_loss` with current loss. + It expects the output has loss. + + :param loss: Current loss of the output + """ + # expect output to be logits, label and loss + loss_ = loss.detach() + self.last_step_loss.copy_(loss_) + self.accum_loss.add_(loss_) + self.count += 1 + + def get_accumulated_value(self): + """Returns accumulated loss. + """ + if gpc.is_initialized(ParallelMode.DATA): + dist.all_reduce(self.accum_loss, op=dist.ReduceOp.SUM, + group=gpc.get_group(ParallelMode.DATA)) + self.accum_loss.div_(gpc.get_world_size(ParallelMode.DATA)) + + self.accum_loss.div_(self.count) + return self.accum_loss.item() + + def get_last_step_value(self): + """Returns :attr:`last_step_loss`. + """ + return self.last_step_loss + + def is_better(a, b): + return a < b + + +class Accuracy(Metric): + """A metric collector for accuracy. It only works for classification + tasks. + + :param epoch_only: Whether the metric only read for the full epoch + :type epoch_only: bool + """ + + def __init__(self, epoch_only: bool): + super().__init__(epoch_only=epoch_only) + self.last_step_sum = torch.zeros(1, device=get_current_device()) + self.last_step_correct = torch.zeros(1, device=get_current_device()) + self.accumulated_sum = torch.zeros(1, device=get_current_device()) + self.accumulated_correct = torch.zeros(1, device=get_current_device()) + + def reset(self) -> None: + self.last_step_sum.zero_() + self.last_step_correct.zero_() + self.accumulated_sum.zero_() + self.accumulated_correct.zero_() + + def update(self, logits, label) -> None: + """Updates last step accuracy and accumulated accuracy with current logits + and labels. It expects the output has logits and labels. + + :param logits: The logits output of the model + :param label: The labels of the input data + """ + if isinstance(logits, (list, tuple)): + logits = logits[0] + if isinstance(label, (list, tuple)): + label = label[0] + + # update + preds = torch.argmax(logits, dim=-1) + correct = torch.sum(label == preds) + self.last_step_sum.fill_(label.size(0)) + self.last_step_correct.fill_(correct) + self.accumulated_sum += self.last_step_sum + self.accumulated_correct += self.last_step_correct + + def get_last_step_value(self): + dist.all_reduce(self.last_step_sum, + group=gpc.get_group(ParallelMode.DATA)) + dist.all_reduce(self.last_step_correct, + group=gpc.get_group(ParallelMode.DATA)) + return (self.last_step_sum / self.last_step_correct).item() + + def get_accumulated_value(self): + dist.all_reduce(self.accumulated_sum, + group=gpc.get_group(ParallelMode.DATA)) + dist.all_reduce(self.accumulated_correct, + group=gpc.get_group(ParallelMode.DATA)) + return (self.accumulated_correct / self.accumulated_sum).item() + + def is_better(a, b) -> bool: + return a > b + + +class Accuracy2D(Accuracy): + """A metric collector for accuracy. It only works for classification + tasks. This class is the same as :class:`Accuracy` but used in 2D + model parallelism. + + :param epoch_only: Whether the metric only read for the full epoch + :type epoch_only: bool + """ + + def __init__(self, epoch_only: bool): + super().__init__(epoch_only=epoch_only) + + def update(self, logits, label) -> None: + if isinstance(logits, (list, tuple)): + logits = logits[0] + if isinstance(label, (list, tuple)): + label = label[0] + + logits = _gather( + logits, + ParallelMode.PARALLEL_2D_ROW, + 1 + ) + logits = _gather( + logits, + ParallelMode.PARALLEL_2D_COL, + 0, + ) + # update + preds = torch.argmax(logits, dim=-1) + correct = torch.sum(label == preds) + self.last_step_sum.fill_(label.size(0)) + self.last_step_correct.fill_(correct) + self.accumulated_sum += self.last_step_sum + self.accumulated_correct += self.last_step_correct + + +class Accuracy2p5D(Accuracy): + def __init__(self, epoch_only: bool): + super().__init__(epoch_only=epoch_only) + + def update(self, logits, label) -> None: + if isinstance(logits, (list, tuple)): + logits = logits[0] + if isinstance(label, (list, tuple)): + label = label[0] + + logits = _gather( + logits, + ParallelMode.PARALLEL_2P5D_ROW, + 1 + ) + logits = _gather( + logits, + ParallelMode.PARALLEL_2P5D_COL, + 0, + ) + logits = _gather( + logits, + ParallelMode.PARALLEL_2P5D_DEP, + 0, + ) + # update + preds = torch.argmax(logits, dim=-1) + correct = torch.sum(label == preds) + self.last_step_sum.fill_(label.size(0)) + self.last_step_correct.fill_(correct) + self.accumulated_sum += self.last_step_sum + self.accumulated_correct += self.last_step_correct + + def is_better(a, b) -> bool: + return a > b + + +class Accuracy3D(Accuracy): + """A metric collector for accuracy. It only works for classification + tasks. This class is the same as :class:`Accuracy` but used in 3D + model parallelism. + + :param input_parallel_mode: The parallel mode of the input, generally it should be `ParallelMode.PARALLEL_3D_OUTPUT` + :type input_parallel_mode: `ParallelMode` + :param weight_parallel_mode: The parallel mode of the weight, generally it should be `ParallelMode.PARALLEL_3D_WEIGHT` + :type weight_parallel_mode: `ParallelMode` + :param epoch_only: Whether the metric only read for the full epoch + :type epoch_only: bool + """ + + def __init__(self, epoch_only, input_parallel_mode, weight_parallel_mode): + super().__init__(epoch_only=epoch_only) + self.depth = int(os.environ['DEPTH_3D']) + self.input_parallel_mode = input_parallel_mode + self.weight_parallel_mode = weight_parallel_mode + self.output_parallel_mode = get_last_group(input_parallel_mode, + weight_parallel_mode) + + def update(self, logits, target): + if isinstance(logits, (list, tuple)): + logits = logits[0] + if isinstance(target, (list, tuple)): + target = target[0] + + batch_size = target.size(0) + + j = gpc.get_local_rank(self.input_parallel_mode) + i = gpc.get_local_rank(self.weight_parallel_mode) + target = torch.chunk(target, self.depth, dim=0)[i] + target = torch.chunk(target, self.depth, dim=0)[j] + + logits = all_gather(logits, -1, self.output_parallel_mode) + prediction = torch.argmax(logits, dim=-1) + correct = torch.sum(prediction == target) + + dist.all_reduce(correct, group=gpc.get_group(self.input_parallel_mode)) + dist.all_reduce(correct, + group=gpc.get_group(self.weight_parallel_mode)) + + self.last_step_sum.fill_(batch_size) + self.last_step_correct.fill_(correct) + self.accumulated_sum += self.last_step_sum + self.accumulated_correct += self.last_step_correct diff --git a/colossalai/utils/__init__.py b/colossalai/utils/__init__.py new file mode 100644 index 000000000..f7ef2259b --- /dev/null +++ b/colossalai/utils/__init__.py @@ -0,0 +1,22 @@ +from .activation_checkpoint import checkpoint +from .common import print_rank_0, sync_model_param_in_dp, is_dp_rank_0, is_tp_rank_0, is_no_pp_or_last_stage +from .cuda import get_current_device, synchronize, empty_cache, set_to_cuda +from .memory import report_memory_usage +from .timer import MultiTimer, Timer + +_GLOBAL_MULTI_TIMER = MultiTimer(on=False) + + +def get_global_multitimer(): + return _GLOBAL_MULTI_TIMER + + +def set_global_multitimer_status(mode: bool): + _GLOBAL_MULTI_TIMER.set_status(mode) + + +__all__ = ['checkpoint', 'print_rank_0', 'sync_model_param_in_dp', 'get_current_device', + 'synchronize', 'empty_cache', 'set_to_cuda', 'report_memory_usage', 'Timer', 'MultiTimer', + 'get_global_multitimer', 'set_global_multitimer_status', + 'is_dp_rank_0', 'is_tp_rank_0', 'is_no_pp_or_last_stage' + ] diff --git a/colossalai/utils/activation_checkpoint.py b/colossalai/utils/activation_checkpoint.py new file mode 100644 index 000000000..e097e92e8 --- /dev/null +++ b/colossalai/utils/activation_checkpoint.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch +from torch.utils.checkpoint import check_backward_validity, detach_variable + +from colossalai.context.random import get_states, get_current_mode, set_seed_states, set_mode, sync_states + + +class CheckpointFunction(torch.autograd.Function): + + @staticmethod + def forward(ctx, run_function, *args): + check_backward_validity(args) + ctx.run_function = run_function + + # preserve rng states + ctx.fwd_cpu_rng_state = torch.get_rng_state() + sync_states() + ctx.fwd_seed_states = get_states(copy=True) + ctx.fwd_current_mode = get_current_mode() + + if hasattr(torch, 'is_autocast_enabled'): + ctx.had_autocast_in_fwd = torch.is_autocast_enabled() + else: + ctx.had_autocast_in_fwd = False + + # Save non-tensor inputs in ctx, keep a placeholder None for tensors + # to be filled out during the backward. + ctx.inputs = [] + ctx.tensor_indices = [] + tensor_inputs = [] + for i, arg in enumerate(args): + if torch.is_tensor(arg): + tensor_inputs.append(arg) + ctx.tensor_indices.append(i) + ctx.inputs.append(None) + else: + ctx.inputs.append(arg) + + ctx.save_for_backward(*tensor_inputs) + + with torch.no_grad(): + outputs = run_function(*args) + return outputs + + @staticmethod + def backward(ctx, *args): + if not torch.autograd._is_checkpoint_valid(): + raise RuntimeError( + "Checkpointing is not compatible with .grad() or when an `inputs` parameter" + " is passed to .backward(). Please use .backward() and do not pass its `inputs`" + " argument.") + # Copy the list to avoid modifying original list. + inputs = list(ctx.inputs) + tensor_indices = ctx.tensor_indices + tensors = ctx.saved_tensors + + # store the current states + bwd_cpu_rng_state = torch.get_rng_state() + sync_states() + bwd_seed_states = get_states(copy=True) + bwd_current_mode = get_current_mode() + + # set the states to what it used to be + torch.set_rng_state(ctx.fwd_cpu_rng_state) + for parallel_mode, state in ctx.fwd_seed_states.items(): + set_seed_states(parallel_mode, state) + set_mode(ctx.fwd_current_mode) + + # Fill in inputs with appropriate saved tensors. + for i, idx in enumerate(tensor_indices): + inputs[idx] = tensors[i] + + detached_inputs = detach_variable(tuple(inputs)) + if ctx.had_autocast_in_fwd: + with torch.enable_grad(), torch.cuda.amp.autocast(): + outputs = ctx.run_function(*detached_inputs) + else: + with torch.enable_grad(): + outputs = ctx.run_function(*detached_inputs) + + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + + # recover the rng states + torch.set_rng_state(bwd_cpu_rng_state) + for parallel_mode, state in bwd_seed_states.items(): + set_seed_states(parallel_mode, state) + set_mode(bwd_current_mode) + + # run backward() with only tensor that requires grad + outputs_with_grad = [] + args_with_grad = [] + for i in range(len(outputs)): + if torch.is_tensor(outputs[i]) and outputs[i].requires_grad: + outputs_with_grad.append(outputs[i]) + args_with_grad.append(args[i]) + if len(outputs_with_grad) == 0: + raise RuntimeError( + "none of output has requires_grad=True," + " this checkpoint() is not necessary") + torch.autograd.backward(outputs_with_grad, args_with_grad) + grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None + for inp in detached_inputs) + + return (None,) + grads + + +def checkpoint(function, *args): + '''Checkpoint the computation while preserve the rng states, modified from Pytorch torch.utils.checkpoint + + :param function: describe the forward pass function. It should know how to handle the input tuples. + :param args: tuple containing inputs to the function + :return: Output of running function on \*args + ''' + return CheckpointFunction.apply(function, *args) diff --git a/colossalai/utils/common.py b/colossalai/utils/common.py new file mode 100644 index 000000000..1496e77ac --- /dev/null +++ b/colossalai/utils/common.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch.distributed as dist + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + + +def print_rank_0(msg: str, logger=None): + '''Print messages and save logs(optional). This is executed only if you are the rank-0 gpu. + + :param msg: A str message to output + :param logger: python logger object, defaults to None + ''' + if gpc.get_global_rank() == 0: + if logger is None: + print(msg, flush=True) + else: + logger.info(msg) + # print(msg, flush=True) + + +def sync_model_param_in_dp(model): + '''Make sure data parameters are consistent during Data Parallel Mode + + :param model: A pyTorch nn.model on whose parameters you check the consistency + ''' + + if gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(ParallelMode.DATA) > 2: + for param in model.parameters(): + ranks = gpc.get_ranks_in_group(ParallelMode.DATA) + dist.broadcast(param, src=ranks[0], group=gpc.get_group(ParallelMode.DATA)) + +def is_dp_rank_0(): + return not gpc.is_initialized(ParallelMode.DATA) or gpc.is_first_rank(ParallelMode.DATA) + +def is_tp_rank_0(): + return not gpc.is_initialized(ParallelMode.TENSOR) or gpc.is_first_rank(ParallelMode.TENSOR) + +def is_no_pp_or_last_stage(): + return not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE) \ No newline at end of file diff --git a/colossalai/utils/cuda.py b/colossalai/utils/cuda.py new file mode 100644 index 000000000..8f8d875e7 --- /dev/null +++ b/colossalai/utils/cuda.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + + +def set_to_cuda(models): + '''Send model to gpu. + + :param models: nn.module or a list of module + ''' + if isinstance(models, list) and len(models) > 1: + ret = [] + for model in models: + ret.append(model.to(get_current_device())) + return ret + elif isinstance(models, list): + return models[0].to(get_current_device()) + else: + return models.to(get_current_device()) + + +def get_current_device(): + ''' + Returns the index of a currently selected device (gpu/cpu). + ''' + if torch.cuda.is_available(): + return torch.cuda.current_device() + else: + return 'cpu' + + +def synchronize(): + ''' + Similar to cuda.synchronize(). + Waits for all kernels in all streams on a CUDA device to complete. + ''' + if torch.cuda.is_available(): + torch.cuda.synchronize() + + +def empty_cache(): + ''' + Similar to cuda.empty_cache() + Releases all unoccupied cached memory currently held by the caching allocator. + ''' + if torch.cuda.is_available(): + torch.cuda.empty_cache() diff --git a/colossalai/utils/memory.py b/colossalai/utils/memory.py new file mode 100644 index 000000000..b47b4099d --- /dev/null +++ b/colossalai/utils/memory.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import gc + +import psutil +import torch + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.logging import get_global_dist_logger + + +def bytes_to_GB(val, decimal=2): + '''A byte-to-Gigabyte converter, defaultly using binary notation. + + :param val: X bytes to convert + :return: X' Gb + ''' + return round(val / (1024 * 1024 * 1024), decimal) + + +def report_memory_usage(message): + '''Calculate and print RAM usage (in GB) + + :param message: a prefix message to add in the log + :type message: str + :raises EnvironmentError: raise error if no distributed environment has been initialized + ''' + if not gpc.is_initialized(ParallelMode.GLOBAL): + raise EnvironmentError("No distributed environment is initialized") + + # python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports + gc.collect() + vm_stats = psutil.virtual_memory() + vm_used = bytes_to_GB(vm_stats.total - vm_stats.available) + + gpu_allocated = bytes_to_GB(torch.cuda.memory_allocated()) + gpu_max_allocated = bytes_to_GB(torch.cuda.max_memory_allocated()) + gpu_cached = bytes_to_GB(torch.cuda.memory_cached()) + gpu_max_cached = bytes_to_GB(torch.cuda.max_memory_cached()) + + get_global_dist_logger().info( + f"{message} - GPU: allocated {gpu_allocated}GB, max allocated {gpu_max_allocated}GB, cached: {gpu_cached} GB, " + f"max cached: {gpu_max_cached}GB, CPU Virtual Memory: used = {vm_used}GB, percent = {vm_stats.percent}%") + + # get the peak memory to report correct data, so reset the counter for the next call + if hasattr(torch.cuda, "reset_peak_memory_stats"): # pytorch 1.4+ + torch.cuda.reset_peak_memory_stats() diff --git a/colossalai/utils/timer.py b/colossalai/utils/timer.py new file mode 100644 index 000000000..a516592dd --- /dev/null +++ b/colossalai/utils/timer.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import time + +from .cuda import synchronize + + +class Timer: + ''' + A timer object which helps to log the execution times, and provides different tools to assess the times. + ''' + + def __init__(self): + self._started = False + self._start_time = time.time() + self._elapsed = 0 + self._history = [] + + @property + def has_history(self): + return len(self._history) != 0 + + def start(self): + '''Fisrtly synchronize cuda, reset the clock and then start the timer. + ''' + self._elapsed = 0 + synchronize() + self._start_time = time.time() + self._started = True + + def stop(self, keep_in_history: bool = False): + '''Stop the timer and record the start-stop time interval. + + :param keep_in_history: whether does it record into history each start-stop interval, defaults to False + :type keep_in_history: bool, optional + :return: start-stop interval + :rtype: int + ''' + synchronize() + end_time = time.time() + elapsed = end_time - self._start_time + if keep_in_history: + self._history.append(elapsed) + self._elapsed = elapsed + self._started = False + return elapsed + + def get_history_mean(self): + '''mean of all history start-stop time intervals. + + :return: mean of time intervals + :rtype: int + ''' + return sum(self._history) / len(self._history) + + def get_history_sum(self): + '''add up all the start-stop time intervals. + + :return: sum of time intervals + :rtype: int + ''' + return sum(self._history) + + def get_elapsed_time(self): + '''return the last start-stop time interval. *use it only when timer is not in progress* + + :return: the last time interval + :rtype: int + ''' + assert not self._started, 'Timer is still in progress' + return self._elapsed + + def reset(self): + '''clear up the timer and its history + ''' + self._history = [] + self._started = False + self._elapsed = 0 + + +class MultiTimer: + '''An object contains multiple timers + ''' + + def __init__(self, on: bool = True): + self._on = on + self._timers = dict() + + def start(self, name: str): + '''Start namely one of the timers + + :param name: timer's key + :type name: str + ''' + if self._on: + if name not in self._timers: + self._timers[name] = Timer() + return self._timers[name].start() + + def stop(self, name: str, keep_in_history: bool): + '''Stop namely one of the timers. + + :param name: timer's key + :param keep_in_history: whether does it record into history each start-stop interval + :type keep_in_history: bool + ''' + if self._on: + return self._timers[name].stop(keep_in_history) + else: + return None + + def get_timer(self, name): + '''Get timer by its name (from multitimer) + + :param name: timer's key + :return: timer with the name you give correctly + :rtype: Timer + ''' + return self._timers[name] + + def reset(self, name=None): + '''Reset timers. + + :param name: if name is designated, the named timer will be reset and others will not, defaults to None + ''' + if self._on: + if name is not None: + self._timers[name].reset() + else: + for timer in self._timers: + timer.reset() + + def is_on(self): + + return self._on + + def set_status(self, mode: bool): + self._on = mode + + def __iter__(self): + for name, timer in self._timers.items(): + yield name, timer diff --git a/configs/resnet/resnet50.py b/configs/resnet/resnet50.py new file mode 100644 index 000000000..57b8b8304 --- /dev/null +++ b/configs/resnet/resnet50.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +import os + +IMG_SIZE = 224 +BATCH_SIZE = 256 + +model = dict( + type='VanillaResNet', + block_type='ResNetBottleneck', + layers=[3, 4, 6, 3], + num_cls=10 +) + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=os.environ['DATA'], + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + shuffle=True, + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=os.environ['DATA'], + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + ) +) + +parallelization = dict( + pipeline=1, + tensor=dict(size=1, mode=None), +) + +optimizer = dict( + type='Adam', + lr=0.01 +) + +loss = dict( + type='CrossEntropyLoss' +) + +max_epochs = 100 + +from colossalai.engine import AMP_TYPE + +fp16 = dict( + mode=AMP_TYPE.APEX, + opt_level='O2', +) diff --git a/configs/sample_config.py b/configs/sample_config.py new file mode 100644 index 000000000..bfc2d68e2 --- /dev/null +++ b/configs/sample_config.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +model = dict() +train_data = dict() +test_data = dict() +optimizer = dict() +loss = dict() +lr_scheduler = dict() + +fp16 = dict() +zero = dict() + +gradient_handler = [] +parallel = dict() + +num_epochs = int +num_steps = int + +cudnn_benchmark = True +cudnn_deterministic = False + +logging = dict() diff --git a/configs/vit/vit_2d.py b/configs/vit/vit_2d.py new file mode 100644 index 000000000..9d09eda2c --- /dev/null +++ b/configs/vit/vit_2d.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from pathlib import Path + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + shuffle=True, + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + checkpoint=True + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=4, + checkpoint=True + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='Accuracy2DHook'), + dict(type='LossHook'), + dict(type='TensorboardHook', log_dir='./tfb_logs'), + # dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +# for fp16 training +# from colossalai.engine import AMP_TYPE +# fp16 = dict( +# mode=AMP_TYPE.PARALLEL, +# initial_scale=2 ** 8 +# ) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +# only needed when pipeline parallel is used +# schedule = dict( +# num_microbatches=8 +# ) + +num_epochs = 60 + +logging = dict( + root_path='./logs' +) diff --git a/configs/vit/vit_3d.py b/configs/vit/vit_3d.py new file mode 100644 index 000000000..037e2c15e --- /dev/null +++ b/configs/vit/vit_3d.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from pathlib import Path + +from colossalai.context import ParallelMode +from colossalai.engine import AMP_TYPE + +try: + import model_zoo +except: + print('You need to set model_zoo to your PYTHONPATH to use the models in the collection') + +BATCH_SIZE = 512 +IMG_SIZE = 32 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=2, + shuffle=True, + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=2, + ) +) + +optimizer = dict( + type='Adam', + lr=0.001 +) + +loss = dict( + type='CrossEntropyLoss3D', + input_parallel_mode=ParallelMode.PARALLEL_3D_OUTPUT, + weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT, +) + +model = dict( + type='vit_tiny_3d_patch4_32', + drop_rate=0.1, +) + +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='LogTimingByEpochHook'), + dict(type='LogMemoryByEpochHook'), + dict( + type='Accuracy3DHook', + input_parallel_mode=ParallelMode.PARALLEL_3D_OUTPUT, + weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT, + ), + dict(type='LossHook'), + dict(type='TensorboardHook', log_dir='./tfb_logs'), + # dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=8, mode='3d'), +) + +fp16 = dict( + mode=AMP_TYPE.PARALLEL, + initial_scale=2 ** 8 +) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +num_epochs = 60 + +logging = dict( + root_path='./logs' +) diff --git a/csrc/colossal_C_frontend.cpp b/csrc/colossal_C_frontend.cpp new file mode 100644 index 000000000..735caf54e --- /dev/null +++ b/csrc/colossal_C_frontend.cpp @@ -0,0 +1,71 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu +#include + +void multi_tensor_scale_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + float scale); + +void multi_tensor_sgd_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + float wd, + float momentum, + float dampening, + float lr, + bool nesterov, + bool first_run, + bool wd_after_momentum, + float scale); + +void multi_tensor_adam_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay); + +void multi_tensor_lamb_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int bias_correction, + const float weight_decay, + const int grad_averaging, + const int mode, + at::Tensor global_grad_norm, + const float max_grad_norm, + at::optional use_nvlamb_python); + +std::tuple multi_tensor_l2norm_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + at::optional per_tensor_python); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_scale", &multi_tensor_scale_cuda, + "Fused overflow check + scale for a list of contiguous tensors"); + m.def("multi_tensor_sgd", &multi_tensor_sgd_cuda, + "Fused SGD optimizer for list of contiguous tensors"); + m.def("multi_tensor_adam", &multi_tensor_adam_cuda, + "Compute and apply gradient update to parameters for Adam optimizer"); + m.def("multi_tensor_lamb", &multi_tensor_lamb_cuda, + "Computes and apply update for LAMB optimizer"); + m.def("multi_tensor_l2norm", &multi_tensor_l2norm_cuda, + "Computes L2 norm for a list of contiguous tensors"); +} \ No newline at end of file diff --git a/csrc/compat.h b/csrc/compat.h new file mode 100644 index 000000000..00066dc95 --- /dev/null +++ b/csrc/compat.h @@ -0,0 +1,10 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/compat.h +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif \ No newline at end of file diff --git a/csrc/multi_tensor_adam.cu b/csrc/multi_tensor_adam.cu new file mode 100644 index 000000000..633e2d63f --- /dev/null +++ b/csrc/multi_tensor_adam.cu @@ -0,0 +1,177 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu +#include +#include +#include +#include +// Another possibility: +// #include + +#include + +#include "type_shim.h" +#include "multi_tensor_apply.cuh" + +#define BLOCK_SIZE 512 +#define ILP 4 + +typedef enum +{ + ADAM_MODE_0 = 0, // L2 regularization mode + ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW) +} adamMode_t; + +using MATH_T = float; + +template +struct AdamFunctor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int *noop_gmem, + TensorListMetadata<4> &tl, + const float beta1, + const float beta2, + const float beta1_correction, + const float beta2_correction, + const float epsilon, + const float lr, + adamMode_t mode, + const float decay) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + + // potentially use to pass in list of scalar + // int tensor_num = tl.start_tensor_this_launch + tensor_loc; + + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + T *g = (T *)tl.addresses[0][tensor_loc]; + g += chunk_idx * chunk_size; + + T *p = (T *)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + T *m = (T *)tl.addresses[2][tensor_loc]; + m += chunk_idx * chunk_size; + + T *v = (T *)tl.addresses[3][tensor_loc]; + v += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + // see note in multi_tensor_scale_kernel.cu + for (int i_start = 0; + i_start < n && i_start < chunk_size; + i_start += blockDim.x * ILP) + { + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; + MATH_T r_v[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + r_g[ii] = g[i]; + r_p[ii] = p[i]; + r_m[ii] = m[i]; + r_v[ii] = v[i]; + } + else + { + r_g[ii] = MATH_T(0); + r_p[ii] = MATH_T(0); + r_m[ii] = MATH_T(0); + r_v[ii] = MATH_T(0); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + if (mode == ADAM_MODE_0) + { // L2 + r_g[ii] = r_g[ii] + (decay * r_p[ii]); + r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + MATH_T update = next_m_unbiased / denom; + r_p[ii] = r_p[ii] - (lr * update); + } + else + { // weight decay + r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]); + r_p[ii] = r_p[ii] - (lr * update); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + p[i] = r_p[ii]; + m[i] = r_m[ii]; + v[i] = r_v[ii]; + } + } + } + } +}; + +void multi_tensor_adam_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay) +{ + using namespace at; + + // Handle bias correction mode + float bias_correction1 = 1.0f, bias_correction2 = 1.0f; + if (bias_correction == 1) + { + bias_correction1 = 1 - std::pow(beta1, step); + bias_correction2 = 1 - std::pow(beta2, step); + } + + // Assume single type across p,g,m1,m2 now + DISPATCH_DOUBLE_FLOAT_AND_HALF( + tensor_lists[0][0].scalar_type(), 0, "adam", + multi_tensor_apply<4>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + AdamFunctor(), + beta1, + beta2, + bias_correction1, + bias_correction2, + epsilon, + lr, + (adamMode_t)mode, + weight_decay);) + + AT_CUDA_CHECK(cudaGetLastError()); +} \ No newline at end of file diff --git a/csrc/multi_tensor_apply.cuh b/csrc/multi_tensor_apply.cuh new file mode 100644 index 000000000..9ce411911 --- /dev/null +++ b/csrc/multi_tensor_apply.cuh @@ -0,0 +1,133 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_apply.cuh +#include +#include +#include +#include +#include +#include "compat.h" + +#include + +// #include + +// This header is the one-stop shop for all your multi-tensor apply needs. + +// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) +constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; +constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; + +template +struct TensorListMetadata +{ + void *addresses[n][depth_to_max_tensors[n - 1]]; + int sizes[depth_to_max_tensors[n - 1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; + int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. + int start_tensor_this_launch; +}; + +template +__global__ void multi_tensor_apply_kernel( + int chunk_size, + volatile int *noop_flag, + T tl, + U callable, + ArgTypes... args) +{ + // Hand the chunk information to the user-supplied functor to process however it likes. + callable(chunk_size, noop_flag, tl, args...); +} + +template +void multi_tensor_apply( + int block_size, + int chunk_size, + const at::Tensor &noop_flag, + const std::vector> &tensor_lists, + T callable, + ArgTypes... args) +{ + TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); + int len0 = tensor_lists[0].size(); + TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); + auto ref_device = tensor_lists[0][0].device(); + TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); + for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices + { + TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); + for (int t = 0; t < tensor_lists[l].size(); t++) + { + // TODO: Print which tensor fails. + bool contiguous_memory = tensor_lists[l][t].is_contiguous(); +#ifdef VERSION_GE_1_5 + contiguous_memory = (contiguous_memory || tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); +#endif + TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); + TORCH_CHECK(tensor_lists[l][t].device() == ref_device, "A tensor was not on the same device as the first tensor"); + TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); + } + } + + int ntensors = tensor_lists[0].size(); + + TensorListMetadata tl; + + const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0])); + auto stream = at::cuda::getCurrentCUDAStream(); + + tl.start_tensor_this_launch = 0; + int loc_block_info = 0; + int loc_tensor_info = 0; + for (int t = 0; t < ntensors; t++) + { + tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) + tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + loc_tensor_info++; + + int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + + for (int chunk = 0; chunk < chunks_this_tensor; chunk++) + { + // std::cout << chunks_this_tensor << std::endl; + tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tl.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && + chunk == chunks_this_tensor - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); + bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); + if (tensors_full || blocks_full || last_chunk) + { + // using accscalar_t = acc_type; + multi_tensor_apply_kernel<<>>( + chunk_size, + noop_flag.DATA_PTR(), + tl, + callable, + args...); + + AT_CUDA_CHECK(cudaGetLastError()); + + // Reset. The control flow possibilities here make my brain hurt. + loc_block_info = 0; + if (chunk == chunks_this_tensor - 1) + { + // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << std::endl; + loc_tensor_info = 0; + tl.start_tensor_this_launch = t + 1; + } + else + { + // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << std::endl; + tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; + for (int d = 0; d < depth; d++) + tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; + loc_tensor_info = 1; + tl.start_tensor_this_launch = t; + } + } + } + } +} \ No newline at end of file diff --git a/csrc/multi_tensor_l2norm_kernel.cu b/csrc/multi_tensor_l2norm_kernel.cu new file mode 100644 index 000000000..03f60b34c --- /dev/null +++ b/csrc/multi_tensor_l2norm_kernel.cu @@ -0,0 +1,455 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_l2norm_kernel.cu +#include +#include +#include +#include +#include +// Another possibility: +// #include + +#include + +#include "type_shim.h" +#include "multi_tensor_apply.cuh" + +#define BLOCK_SIZE 512 +#define ILP 4 + +template +__device__ __forceinline__ bool is_aligned(T *p) +{ + return ((uint64_t)p) % (ILP * sizeof(T)) == 0; +} + +template +__device__ __forceinline__ void load_store(T *dst, T *src, int dst_offset, int src_offset) +{ + typedef typename std::aligned_storage::type LT; + ((LT *)dst)[dst_offset] = ((LT *)src)[src_offset]; +} + +template +struct L2NormFunctor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int *noop_gmem, + TensorListMetadata<1> &tl, + float *output, + float *output_per_tensor, + bool per_tensor, + int max_chunks_per_tensor) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + x_t *x = (x_t *)tl.addresses[0][tensor_loc]; + x += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + __shared__ float s_vals[512]; + + float vals[ILP]; // = {0}; // this probably works too but I want to be sure... + x_t r_x[ILP]; + for (int i = 0; i < ILP; i++) + { + vals[i] = 0.f; + r_x[i] = 0; + } + + // to make things simple, we put aligned case in a different code path + if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) + { + for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) + { + // load + load_store(r_x, x, 0, i_start); +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + float next = static_cast(r_x[ii]); + vals[ii] += next * next; + } + } + } + else + { + for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) + { +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + float next = static_cast(x[i]); + vals[ii] += next * next; + } + } + } + } + + float val = 0.f; + for (int i = 0; i < ILP; i++) + val += vals[i]; + + float final = reduce_block_into_lanes(s_vals, val); + + if (threadIdx.x == 0) + { + if (!isfinite(final)) + *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. + output[blockIdx.x] += final; + if (per_tensor) + output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; + } + } +}; + +// Probably better to template, but since we are not likely to support other norm +template +struct MaxNormFunctor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int *noop_gmem, + TensorListMetadata<1> &tl, + float *output, + float *output_per_tensor, + bool per_tensor, + int max_chunks_per_tensor) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + x_t *x = (x_t *)tl.addresses[0][tensor_loc]; + x += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + __shared__ float s_vals[512]; + + float vals[ILP]; // = {0}; // this probably works too but I want to be sure... + x_t r_x[ILP]; + for (int i = 0; i < ILP; i++) + { + vals[i] = 0.f; + r_x[i] = 0; + } + + // to make things simple, we put aligned case in a different code path + if (n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(x)) + { + for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) + { + // load + load_store(r_x, x, 0, i_start); +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + float next = static_cast(r_x[ii]); + vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next)); + } + } + } + else + { + for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) + { +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + float next = static_cast(x[i]); + vals[ii] = fmaxf(fabsf(vals[ii]), fabsf(next)); + } + } + } + } + + float val = 0.f; + for (int i = 0; i < ILP; i++) + val = fmaxf(fabsf(val), fabsf(vals[i])); + + float final = reduce_block_into_lanes_max_op(s_vals, val); + + if (threadIdx.x == 0) + { + if (!isfinite(final)) + *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. + output[blockIdx.x] = fmaxf(fabsf(output[blockIdx.x]), fabsf(final)); + if (per_tensor) + output_per_tensor[(tl.start_tensor_this_launch + tensor_loc) * max_chunks_per_tensor + chunk_idx] = final; + } + } +}; + +__global__ void cleanup( + float *output, + float *output_per_tensor, + float *ret, + float *ret_per_tensor, + bool per_tensor, + int max_chunks_per_tensor) +{ + __shared__ float vals[512]; + + if (blockIdx.x == 0) + { + float val = 0; + if (threadIdx.x < 320) + val = output[threadIdx.x]; + + float final = reduce_block_into_lanes(vals, val); + + if (threadIdx.x == 0) + *ret = sqrt(final); + } + + if (per_tensor) + { + float *output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor; + + float val = 0; + for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) + val += output_this_tensor[i]; + + float final = reduce_block_into_lanes(vals, val); + + if (threadIdx.x == 0) + ret_per_tensor[blockIdx.x] = sqrt(final); + } +} + +__global__ void cleanup_v2( + float *output, + float *output_per_tensor, + float *ret, + float *ret_per_tensor, + bool per_tensor, + int max_chunks_per_tensor, + int norm_type, + float alpha, + float beta) +{ + __shared__ float vals[512]; + + if (blockIdx.x == 0) + { + float val = 0; + if (threadIdx.x < 320) + val = output[threadIdx.x]; + + if (norm_type == 0) + { + float final = reduce_block_into_lanes_max_op(vals, val); + if (threadIdx.x == 0) + *ret = alpha * (*ret) + beta * final; + } + else + { + float final = reduce_block_into_lanes(vals, val); + if (threadIdx.x == 0) + *ret = sqrt(alpha * (*ret) * (*ret) + beta * final); + } + } + + if (per_tensor) + { + float *output_this_tensor = output_per_tensor + blockIdx.x * max_chunks_per_tensor; + + if (norm_type == 0) + { + float val = 0; + for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) + val = fmaxf(fabsf(val), fabsf(output_this_tensor[i])); + + float final = reduce_block_into_lanes_max_op(vals, val); + + if (threadIdx.x == 0) + ret_per_tensor[blockIdx.x] = alpha * ret_per_tensor[blockIdx.x] + beta * final; + } + else + { + float val = 0; + for (int i = threadIdx.x; i < max_chunks_per_tensor; i += blockDim.x) + val += output_this_tensor[i]; + + float final = reduce_block_into_lanes(vals, val); + + if (threadIdx.x == 0) + ret_per_tensor[blockIdx.x] = sqrt(alpha * ret_per_tensor[blockIdx.x] * ret_per_tensor[blockIdx.x] + beta * final); + } + } +} + +std::tuple multi_tensor_l2norm_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + at::optional per_tensor_python) +{ + bool per_tensor = per_tensor_python.has_value() ? per_tensor_python.value() : false; + + auto float_options = tensor_lists[0][0].options().dtype(at::kFloat); + auto output = at::zeros({320}, float_options); + + at::Tensor output_per_tensor; + at::Tensor ret_per_tensor; + + int ntensors = tensor_lists[0].size(); + int max_chunks_per_tensor = -1; + + if (per_tensor) + { + for (int t = 0; t < ntensors; t++) + { + int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + if (max_chunks_this_tensor > max_chunks_per_tensor) + max_chunks_per_tensor = max_chunks_this_tensor; + } + output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options); + ret_per_tensor = at::empty({ntensors}, float_options); + } + else + { + ret_per_tensor = at::empty({0}, float_options); + } + + DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda", + multi_tensor_apply<1>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + L2NormFunctor(), + output.DATA_PTR(), + per_tensor ? output_per_tensor.DATA_PTR() : nullptr, + per_tensor, + max_chunks_per_tensor);) + + AT_CUDA_CHECK(cudaGetLastError()); + // AT_CUDA_CHECK(cudaDeviceSynchronize()); + + // This involves one more small kernel launches, but will be negligible end to end. + // I could get rid of these by hacking the functor + multi tensor harness with persistence + // logic, but keeping it simple for now + auto ret = at::empty({1}, output.options()); + const at::cuda::OptionalCUDAGuard device_guard(device_of(output)); + auto stream = at::cuda::getCurrentCUDAStream(); + cleanup<<>>( + output.DATA_PTR(), + per_tensor ? output_per_tensor.DATA_PTR() : nullptr, + ret.DATA_PTR(), + per_tensor ? ret_per_tensor.DATA_PTR() : nullptr, + per_tensor, + max_chunks_per_tensor); + + return std::tuple(ret, ret_per_tensor); +} + +// Compute and update grad norm +// Here use a per tensor norm, and blend new norm(n) and old norm(gn) by +// L-2: gn = sqrt(a * gn^2 + b * n^2) +// L-inf: gn = a * gn + b * n +void multi_tensor_norm_out_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + at::Tensor out, + const float alpha, + const float beta, + const int norm_type) +{ + auto float_options = tensor_lists[0][0].options().dtype(at::kFloat); + TORCH_CHECK(tensor_lists[0][0].device() == noop_flag.device(), "noop flag should be on the same device as tensors"); + // we don't need global thus uses empty here + auto output = at::empty({320}, float_options); + + at::Tensor output_per_tensor; + at::Tensor ret_per_tensor; + + int ntensors = tensor_lists[0].size(); + int max_chunks_per_tensor = -1; + + for (int t = 0; t < ntensors; t++) + { + int max_chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + if (max_chunks_this_tensor > max_chunks_per_tensor) + max_chunks_per_tensor = max_chunks_this_tensor; + } + + // Although it is single write then read, still need to be zero + // Since tailing element also participate cleanup + output_per_tensor = at::zeros({ntensors * max_chunks_per_tensor}, float_options); + + if (norm_type == 0) + { + DISPATCH_FLOAT_AND_HALF( + tensor_lists[0][0].scalar_type(), 0, "multi_tensor_maxnorm_cuda", + multi_tensor_apply<1>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + MaxNormFunctor(), + output.DATA_PTR(), + output_per_tensor.DATA_PTR(), + true, + max_chunks_per_tensor);) + } + else + { + DISPATCH_FLOAT_AND_HALF( + tensor_lists[0][0].scalar_type(), 0, "multi_tensor_l2norm_cuda", + multi_tensor_apply<1>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + L2NormFunctor(), + output.DATA_PTR(), + output_per_tensor.DATA_PTR(), + true, + max_chunks_per_tensor);) + } + AT_CUDA_CHECK(cudaGetLastError()); + + // AT_CUDA_CHECK(cudaDeviceSynchronize()); + + // This involves one more small kernel launches, but will be negligible end to end. + // I could get rid of these by hacking the functor + multi tensor harness with persistence + // logic, but keeping it simple for now + auto ret = at::empty({1}, output.options()); + + // Adding the following device guard since it happens sometimes that the + // tensors are on one device and the cuda stream is on another device which + // results in ILLEGAL MEM ACCESS error. + const at::cuda::OptionalCUDAGuard device_guard(device_of(output)); + auto stream = at::cuda::getCurrentCUDAStream(); + cleanup_v2<<>>( + output.DATA_PTR(), + output_per_tensor.DATA_PTR(), + ret.DATA_PTR(), + out.DATA_PTR(), + true, + max_chunks_per_tensor, + norm_type, + alpha, + beta); + + return; +} \ No newline at end of file diff --git a/csrc/multi_tensor_lamb.cu b/csrc/multi_tensor_lamb.cu new file mode 100644 index 000000000..d67ce92cd --- /dev/null +++ b/csrc/multi_tensor_lamb.cu @@ -0,0 +1,427 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_lamb.cu +#include +#include +#include +#include +// Another possibility: +// #include + +#include + +#include "type_shim.h" +#include "multi_tensor_apply.cuh" + +#define BLOCK_SIZE 512 +#define ILP 4 + +template +__device__ __forceinline__ bool is_aligned(T *p) +{ + return ((uint64_t)p) % (ILP * sizeof(T)) == 0; +} + +template +__device__ __forceinline__ void load_store(T *dst, T *src, int dst_offset, int src_offset) +{ + typedef typename std::aligned_storage::type LT; + ((LT *)dst)[dst_offset] = ((LT *)src)[src_offset]; +} + +typedef enum +{ + MOMENT_MODE_0 = 0, // L2 regularization mode + MOMENT_MODE_1 = 1 // Decoupled weight decay mode +} adamMode_t; + +std::tuple multi_tensor_l2norm_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + at::optional per_tensor_python); + +using MATH_T = float; + +template +struct LAMBStage1Functor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int *noop_gmem, + TensorListMetadata<4> &tl, + const float beta1, + const float beta2, + const float beta3, + const float beta1_correction, + const float beta2_correction, + const float epsilon, + adamMode_t mode, + const float decay, + const float *global_grad_norm, + const float max_global_grad_norm) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + float clipped_global_grad_norm = (*global_grad_norm) > max_global_grad_norm ? (*global_grad_norm) / max_global_grad_norm : 1.0f; + + T *g = (T *)tl.addresses[0][tensor_loc]; + g += chunk_idx * chunk_size; + + T *p = (T *)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + T *m = (T *)tl.addresses[2][tensor_loc]; + m += chunk_idx * chunk_size; + + T *v = (T *)tl.addresses[3][tensor_loc]; + v += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; + MATH_T r_v[ILP]; + // to make things simple, we put aligned case in a different code path + if (n % ILP == 0 && + chunk_size % ILP == 0 && + is_aligned(g) && + is_aligned(p) && + is_aligned(m) && + is_aligned(v)) + { + T l_g[ILP]; + T l_p[ILP]; + T l_m[ILP]; + T l_v[ILP]; + for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) + { + // load + load_store(l_g, g, 0, i_start); + if (decay != 0) + load_store(l_p, p, 0, i_start); + load_store(l_m, m, 0, i_start); + load_store(l_v, v, 0, i_start); + // unpack +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + r_g[ii] = l_g[ii]; + if (decay == 0) + { + r_p[ii] = MATH_T(0); + } + else + { + r_p[ii] = l_p[ii]; + } + r_m[ii] = l_m[ii]; + r_v[ii] = l_v[ii]; + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + if (mode == MOMENT_MODE_0) + { + MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; + // L2 on scaled grad + scaled_grad = scaled_grad + decay * r_p[ii]; + r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; + r_v[ii] = r_v[ii] * beta2 + (1 - beta2) * scaled_grad * scaled_grad; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + r_p[ii] = next_m_unbiased / denom; + } + else + { + MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; + r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; + r_v[ii] = r_v[ii] * beta2 + (1 - beta2) * scaled_grad * scaled_grad; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + r_p[ii] = (next_m_unbiased / denom) + (decay * r_p[ii]); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + l_p[ii] = r_p[ii]; + l_m[ii] = r_m[ii]; + l_v[ii] = r_v[ii]; + } + // store + load_store(g, l_p, i_start, 0); + load_store(m, l_m, i_start, 0); + load_store(v, l_v, i_start, 0); + } + } + else + { + // see note in multi_tensor_scale_kernel.cu + for (int i_start = 0; + i_start < n && i_start < chunk_size; + i_start += blockDim.x * ILP) + { + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; + MATH_T r_v[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + r_g[ii] = g[i]; + // special ?optimization? for lamb stage 1 + if (decay == 0) + { + r_p[ii] = MATH_T(0); + } + else + { + r_p[ii] = p[i]; + } + r_m[ii] = m[i]; + r_v[ii] = v[i]; + } + else + { + r_g[ii] = MATH_T(0); + r_p[ii] = MATH_T(0); + r_m[ii] = MATH_T(0); + r_v[ii] = MATH_T(0); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + if (mode == MOMENT_MODE_0) + { + MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; + // L2 on scaled grad + scaled_grad = scaled_grad + decay * r_p[ii]; + r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; + r_v[ii] = r_v[ii] * beta2 + (1 - beta2) * scaled_grad * scaled_grad; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + r_p[ii] = next_m_unbiased / denom; + } + else + { + MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; + r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; + r_v[ii] = r_v[ii] * beta2 + (1 - beta2) * scaled_grad * scaled_grad; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + r_p[ii] = (next_m_unbiased / denom) + (decay * r_p[ii]); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + g[i] = r_p[ii]; + m[i] = r_m[ii]; + v[i] = r_v[ii]; + } + } + } + } + } +}; + +// Step 2 reads in 'update' value and per-tensor param_norm and update_norm. +// It computes new parameter value. +template +struct LAMBStage2Functor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int *noop_gmem, + TensorListMetadata<2> &tl, + const float *per_tensor_param_norm, + const float *per_tensor_update_norm, + const float learning_rate, + const float decay, + bool use_nvlamb) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int tensor_num = tl.start_tensor_this_launch + tensor_loc; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + MATH_T ratio = learning_rate; + // nvlamb: apply adaptive learning rate to all parameters + // otherwise, only apply to those with non-zero weight decay + if (use_nvlamb || (decay != 0.0)) + { + float param_norm = per_tensor_param_norm[tensor_num]; + float update_norm = per_tensor_update_norm[tensor_num]; + ratio = (update_norm != 0.0f && param_norm != 0.0f) ? learning_rate * (param_norm / update_norm) : learning_rate; + } + + T *update = (T *)tl.addresses[0][tensor_loc]; + update += chunk_idx * chunk_size; + + T *p = (T *)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + // to make things simple, we put aligned case in a different code path + if (n % ILP == 0 && + chunk_size % ILP == 0 && + is_aligned(p) && + is_aligned(update)) + { + T r_p[ILP]; + T r_update[ILP]; + for (int i_start = threadIdx.x; i_start * ILP < n && i_start * ILP < chunk_size; i_start += blockDim.x) + { + // load + load_store(r_p, p, 0, i_start); + load_store(r_update, update, 0, i_start); +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + r_p[ii] = static_cast(r_p[ii]) - (ratio * static_cast(r_update[ii])); + } + load_store(p, r_p, i_start, 0); + } + } + else + { + for (int i_start = 0; + i_start < n && i_start < chunk_size; + i_start += blockDim.x * ILP) + { + MATH_T r_p[ILP]; + MATH_T r_update[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + r_p[ii] = p[i]; + r_update[ii] = update[i]; + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + r_p[ii] = r_p[ii] - (ratio * r_update[ii]); + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + p[i] = r_p[ii]; + } + } + } + } + } +}; + +void multi_tensor_lamb_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int bias_correction, + const float weight_decay, + const int grad_averaging, + const int mode, + at::Tensor global_grad_norm, + const float max_grad_norm, + at::optional use_nvlamb_python) +{ + using namespace at; + // Master weight and 32bit momentum(potentially changing) is not handled by this + // So we assume every tensor are all in the same type + + bool use_nvlamb = use_nvlamb_python.has_value() ? use_nvlamb_python.value() : false; + + // Handle bias correction mode + float bias_correction1 = 1.0f, bias_correction2 = 1.0f; + if (bias_correction == 1) + { + bias_correction1 = 1 - std::pow(beta1, step); + bias_correction2 = 1 - std::pow(beta2, step); + } + + // Handle grad averaging mode + float beta3 = 1.0f; + if (grad_averaging == 1) + beta3 = 1 - beta1; + + std::vector> grad_list(tensor_lists.begin(), tensor_lists.begin() + 1); + std::vector> param_list(tensor_lists.begin() + 1, tensor_lists.begin() + 2); + + // Compute per tensor param norm + auto param_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, param_list, true); + + // We now in-place modify grad to store update before compute its norm + // Generally this is not a issue since people modify grad in step() method all the time + // We can also grab list of empty tensor to avoid this, but I'd like to save space/cpu code + DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", + multi_tensor_apply<4>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + LAMBStage1Functor(), + beta1, + beta2, + beta3, // 1-beta1 or 1 depends on averaging mode + bias_correction1, + bias_correction2, + epsilon, + (adamMode_t)mode, + weight_decay, + global_grad_norm.DATA_PTR(), + max_grad_norm);) + + // Compute update norms + auto update_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, grad_list, true); + + std::vector> grad_param_list(tensor_lists.begin(), tensor_lists.begin() + 2); + + DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2", + multi_tensor_apply<2>( + BLOCK_SIZE, + chunk_size, + noop_flag, + grad_param_list, + LAMBStage2Functor(), + std::get<1>(param_norm_tuple).DATA_PTR(), + std::get<1>(update_norm_tuple).DATA_PTR(), + lr, + weight_decay, + use_nvlamb);) + + AT_CUDA_CHECK(cudaGetLastError()); +} \ No newline at end of file diff --git a/csrc/multi_tensor_scale_kernel.cu b/csrc/multi_tensor_scale_kernel.cu new file mode 100644 index 000000000..40bd2c7a0 --- /dev/null +++ b/csrc/multi_tensor_scale_kernel.cu @@ -0,0 +1,136 @@ +#include +#include +#include +#include +// Another possibility: +// #include + +#include +// Stringstream is a big hammer, but I want to rely on operator<< for dtype. +#include + +#include "type_shim.h" +#include "multi_tensor_apply.cuh" + +#define BLOCK_SIZE 512 +#define ILP 4 + +template +__device__ __forceinline__ bool is_aligned(T* p){ + return ((uint64_t)p) % (ILP*sizeof(T)) == 0; +} + +template +__device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ + typedef typename std::aligned_storage::type LT; + ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; +} + +template +struct ScaleFunctor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int* noop_gmem, + TensorListMetadata<2>& tl, + float scale) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + in_t* in = (in_t*)tl.addresses[0][tensor_loc]; + in += chunk_idx*chunk_size; + + out_t* out = (out_t*)tl.addresses[1][tensor_loc]; + out += chunk_idx*chunk_size; + + n -= chunk_idx*chunk_size; + + bool finite = true; + in_t r_in[ILP]; + out_t r_out[ILP]; + + // to make things simple, we put aligned case in a different code path + if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(in) && is_aligned(out)) + { + for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) + { + // load + load_store(r_in, in, 0 , i_start); +#pragma unroll + for(int ii = 0; ii < ILP; ii++) + { + r_out[ii] = static_cast(r_in[ii]) * scale; + finite = finite && isfinite(r_in[ii]); + } + // store + load_store(out, r_out, i_start, 0); + } + } + else + { + // Non-divergent exit condition for __syncthreads, not necessary here + for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) + { +#pragma unroll + for(int ii = 0; ii < ILP; ii++) + { + r_in[ii] = 0; + int i = i_start + threadIdx.x + ii*blockDim.x; + if(i < n && i < chunk_size) + r_in[ii] = in[i]; + } + // note for clarification to future michael: + // From a pure memory dependency perspective, there's likely no point unrolling + // the write loop, since writes just fire off once their LDGs arrive. + // Put another way, the STGs are dependent on the LDGs, but not on each other. + // There is still compute ILP benefit from unrolling the loop though. +#pragma unroll + for(int ii = 0; ii < ILP; ii++) + { + r_out[ii] = static_cast(r_in[ii]) * scale; + finite = finite && isfinite(r_in[ii]); + } +#pragma unroll + for(int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii*blockDim.x; + if(i < n && i < chunk_size) + out[i] = r_out[ii]; + } + } + } + if(!finite) + *noop_gmem = 1; // Blindly fire off a write. These will race but that's ok. + } +}; + +void multi_tensor_scale_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + float scale) +{ + using namespace at; + // The output (downscaled) type is always float. + // If build times suffer, think about where to put this dispatch, + // and what logic should be moved out of multi_tensor_apply. + + DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_scale_cuda", + DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_scale_cuda", + multi_tensor_apply<2>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + ScaleFunctor(), + scale); )) + AT_CUDA_CHECK(cudaGetLastError()); + + // AT_CUDA_CHECK(cudaDeviceSynchronize()); +} \ No newline at end of file diff --git a/csrc/multi_tensor_sgd_kernel.cu b/csrc/multi_tensor_sgd_kernel.cu new file mode 100644 index 000000000..bc30e2722 --- /dev/null +++ b/csrc/multi_tensor_sgd_kernel.cu @@ -0,0 +1,282 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_sgd_kernel.cu +#include +#include +#include +#include +#include "multi_tensor_apply.cuh" +#include "compat.h" + +#include +#include + +#define BLOCK_SIZE 512 +#define ILP 4 + +/** + * Perform fused SGD on multiple buffers + * N: number of tensors + * tl[0] : gradients + * tl[1] : weights + * tl[2] : momentum buffers + * tl[3] : fp16 weights (if appropriate) + * wd : weight_decay (scalar) + * momentum : momentum (scalar) + * dampening : momentum dampening (scalar) + * lr : learning rate (scalar) + * nesterov : enable nesterov (bool) + * first run : necessary for proper momentum handling & init + * wd_after_momentum : apply weight decay _after_ momentum instead of before + **/ +template +struct SGDFunctor +{ + __device__ __forceinline__ void operator()( + int chunk_size, + volatile int *noop_gmem, + TensorListMetadata &tl, + float wd, + float momentum, + float dampening, + float lr, + bool nesterov, + bool first_run, + bool wd_after_momentum, + float scale) + { + // Early exit if we don't need to do anything + if (*noop_gmem) + return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + T_grad *grad_in = (T_grad *)tl.addresses[0][tensor_loc]; + grad_in += chunk_idx * chunk_size; + + T_weight *weight_in = (T_weight *)tl.addresses[1][tensor_loc]; + weight_in += chunk_idx * chunk_size; + + T_weight *mom_in = (T_weight *)tl.addresses[2][tensor_loc]; + mom_in += chunk_idx * chunk_size; + + at::Half *model_weights_out = nullptr; + if (N == 4) + { + model_weights_out = (at::Half *)tl.addresses[3][tensor_loc]; + model_weights_out += chunk_idx * chunk_size; + } + + n -= chunk_idx * chunk_size; + + // Non-divergent exit condition for the __syncthreads + float incoming_grads[ILP]; + float incoming_weights[ILP]; + float incoming_moms[ILP]; + for (int i_start = 0; + i_start < n && i_start < chunk_size; + i_start += blockDim.x * ILP) + { +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + incoming_grads[ii] = 0; + incoming_weights[ii] = 0; + incoming_moms[ii] = 0; + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + incoming_grads[ii] = static_cast(grad_in[i]) * scale; + incoming_weights[ii] = static_cast(weight_in[i]); + incoming_moms[ii] = static_cast(mom_in[i]); + } + } + +// note for clarification to future michael: +// From a pure memory dependency perspective, there's likely no point unrolling +// the write loop, since writes just fire off once their LDGs arrive. +// Put another way, the STGs are dependent on the LDGs, but not on each other. +// There is still compute ILP benefit from unrolling the loop though. +#pragma unroll + for (int ii = 0; ii < ILP; ii++) + { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) + { + // apply weight decay before momentum if necessary + if (wd != 0.f && !wd_after_momentum) + incoming_grads[ii] += wd * incoming_weights[ii]; + + if (momentum != 0.f) + { + if (!first_run) + incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii]; + else // initialize momentums to current incoming grads + incoming_moms[ii] = incoming_grads[ii]; + + if (nesterov) + incoming_grads[ii] += momentum * incoming_moms[ii]; + else + incoming_grads[ii] = incoming_moms[ii]; + } + + // Apply WD after momentum if desired + if (wd != 0.f && wd_after_momentum) + incoming_grads[ii] += wd * incoming_weights[ii]; + + // adjust the weight and write out + weight_in[i] += (-lr * incoming_grads[ii]); + + // if necessary, write out an fp16 copy of the weights + if (N == 4) + model_weights_out[i] = static_cast(weight_in[i]); + + // also write out the new momentum + if (momentum != 0.f) + mom_in[i] = incoming_moms[ii]; + } + } + } + } +}; + +void multi_tensor_sgd_cuda( + int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + float wd, + float momentum, + float dampening, + float lr, + bool nesterov, + bool first_run, + bool wd_after_momentum, + float scale) +{ + auto num_tensors = tensor_lists.size(); + auto grad_type = tensor_lists[0][0].scalar_type(); + auto weight_type = tensor_lists[1][0].scalar_type(); + + if (num_tensors == 4) + for (int i = 0; i < tensor_lists[3].size(); i++) + TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half, + "Additional output tensors should always be fp16."); + + TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors"); + + // We have 3 possibilities to handle here, in terms of + // grad_type, param_type, momentum_type, requires_fp16_copy + // 1. fp16, fp16, fp16, No + // 2. fp32, fp32, fp32, No + // 3. fp16, fp32, fp32, Yes + // 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case + // It's easier to hardcode these possibilities than to use + // switches etc. to handle the cross-product of cases where + // we don't want the majority of them. + + // Case 1. fp16, fp16, fp16, No + if (grad_type == at::ScalarType::Half && + weight_type == at::ScalarType::Half && + num_tensors == 3) + { + multi_tensor_apply<3>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + SGDFunctor<3, at::Half, at::Half>(), + wd, + momentum, + dampening, + lr, + nesterov, + first_run, + wd_after_momentum, + scale); + } + // Case 2. fp16, fp32, fp32, No + // else if (grad_type == at::ScalarType::Half && + // weight_type == at::ScalarType::Float && + // num_tensors == 3) { + // multi_tensor_apply<3>( + // BLOCK_SIZE, + // chunk_size, + // noop_flag, + // tensor_lists, + // SGDFunctor<3, at::Half, float>(), + // wd, + // momentum, + // dampening, + // lr, + // nesterov, + // first_run, + // wd_after_momentum); + // } + // Case 2. fp32, fp32, fp32, No + else if (grad_type == at::ScalarType::Float && + weight_type == at::ScalarType::Float && + num_tensors == 3) + { + multi_tensor_apply<3>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + SGDFunctor<3, float, float>(), + wd, + momentum, + dampening, + lr, + nesterov, + first_run, + wd_after_momentum, + scale); + } + // Case 3. fp16, fp32, fp32, Yes + else if (grad_type == at::ScalarType::Half && + weight_type == at::ScalarType::Float && + num_tensors == 4) + { + multi_tensor_apply<4>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + SGDFunctor<4, at::Half, float>(), + wd, + momentum, + dampening, + lr, + nesterov, + first_run, + wd_after_momentum, + scale); + } + // Case 4. fp32, fp32, fp32, Yes + else if (grad_type == at::ScalarType::Float && + weight_type == at::ScalarType::Float && + num_tensors == 4) + { + multi_tensor_apply<4>( + BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + SGDFunctor<4, float, float>(), + wd, + momentum, + dampening, + lr, + nesterov, + first_run, + wd_after_momentum, + scale); + } + else + { + AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ", + "gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors); + } + + AT_CUDA_CHECK(cudaGetLastError()); +} \ No newline at end of file diff --git a/csrc/type_shim.h b/csrc/type_shim.h new file mode 100644 index 000000000..e9696dea6 --- /dev/null +++ b/csrc/type_shim.h @@ -0,0 +1,202 @@ +// modified from https://github.com/NVIDIA/apex/blob/master/csrc/type_shim.h +#include +#include "compat.h" + +// Forward/backward compatiblity hack around +// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288 +// pending more future-proof guidance from upstream. +// struct TypeShim +// { +// const at::Type& payload; +// TypeShim(const at::Type& type) : payload(type) {} +// // Enable trivial conversion to a const at::Type& for pre-3aeb78 +// operator const at::Type&(){ return payload; }; +// // Enable dispatch switch statements to take *this directly for post-3aeb78 +// //operator at::ScalarType(){ return payload.; }; +// }; + +#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BYTE(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Byte: \ + { \ + using scalar_t_##LEVEL = uint8_t; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Double: \ + { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: \ + { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_##LEVEL = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Double: \ + { \ + using scalar_t_##LEVEL = double; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Float: \ + { \ + using scalar_t_##LEVEL = float; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +template +__device__ __forceinline__ T reduce_block_into_lanes(T *x, + T val, + int lanes = 1, + bool share_result = false) // lanes is intended to be <= 32. +{ + int tid = threadIdx.x + threadIdx.y * blockDim.x; + int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32. + + if (blockSize >= 64) + { + x[tid] = val; + __syncthreads(); + } + +#pragma unroll + for (int i = (blockSize >> 1); i >= 64; i >>= 1) + { + if (tid < i) + x[tid] = x[tid] + x[tid + i]; + __syncthreads(); + } + + T final; + + if (tid < 32) + { + if (blockSize >= 64) + final = x[tid] + x[tid + 32]; + else + final = val; + // __SYNCWARP(); + +#pragma unroll + for (int i = 16; i >= lanes; i >>= 1) + final = final + __shfl_down_sync(0xffffffff, final, i); + } + + if (share_result) + { + if (tid < lanes) + x[tid] = final; // EpilogueOp + // Make sure the smem result is visible to all warps. + __syncthreads(); + } + + return final; +} + +template +__device__ __forceinline__ T reduce_block_into_lanes_max_op(T *x, + T val, + int lanes = 1, + bool share_result = false) // lanes is intended to be <= 32. +{ + int tid = threadIdx.x + threadIdx.y * blockDim.x; + int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32. + + if (blockSize >= 64) + { + x[tid] = val; + __syncthreads(); + } + +#pragma unroll + for (int i = (blockSize >> 1); i >= 64; i >>= 1) + { + if (tid < i) + x[tid] = fmaxf(fabsf(x[tid]), fabsf(x[tid + i])); + __syncthreads(); + } + + T final; + + if (tid < 32) + { + if (blockSize >= 64) + final = fmaxf(fabsf(x[tid]), fabsf(x[tid + 32])); + else + final = val; + // __SYNCWARP(); + +#pragma unroll + for (int i = 16; i >= lanes; i >>= 1) + final = fmaxf(fabsf(final), fabsf(__shfl_down_sync(0xffffffff, final, i))); + } + + if (share_result) + { + if (tid < lanes) + x[tid] = final; // EpilogueOp + // Make sure the smem result is visible to all warps. + __syncthreads(); + } + + return final; +} \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 000000000..9f43a48d6 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,26 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = .build +SPHINXAPIDOC ?= sphinx-apidoc +SPHINX_APIDOC_OPTIONS = members +SPHINX_APIDOC_TEMPLATEDIR = _templates/apidoc + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile apidoc + +apidoc: + @SPHINX_APIDOC_OPTIONS=$(SPHINX_APIDOC_OPTIONS) $(SPHINXAPIDOC) -f -T -e -M -d 2 -t $(SPHINX_APIDOC_TEMPLATEDIR) -o ./colossalai ../colossalai +# @$(SPHINXAPIDOC) -f -o ./model_zoo ../model_zoo +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/css/rtd_theme.css b/docs/_static/css/rtd_theme.css new file mode 100644 index 000000000..caf42dc5a --- /dev/null +++ b/docs/_static/css/rtd_theme.css @@ -0,0 +1,3 @@ +.wy-nav-content { + max-width: 80%; +} \ No newline at end of file diff --git a/docs/_templates/apidoc/module.rst_t b/docs/_templates/apidoc/module.rst_t new file mode 100644 index 000000000..d9a50e6b9 --- /dev/null +++ b/docs/_templates/apidoc/module.rst_t @@ -0,0 +1,9 @@ +{%- if show_headings %} +{{- basename | e | heading }} + +{% endif -%} +.. automodule:: {{ qualname }} +{%- for option in automodule_options %} + :{{ option }}: +{%- endfor %} + diff --git a/docs/_templates/apidoc/package.rst_t b/docs/_templates/apidoc/package.rst_t new file mode 100644 index 000000000..83742b3f7 --- /dev/null +++ b/docs/_templates/apidoc/package.rst_t @@ -0,0 +1,52 @@ +{%- macro automodule(modname, options) -%} +.. automodule:: {{ modname }} +{%- for option in options %} + :{{ option }}: +{%- endfor %} +{%- endmacro %} + +{%- macro toctree(docnames) -%} +.. toctree:: + :maxdepth: {{ maxdepth }} +{% for docname in docnames %} + {{ docname }} +{%- endfor %} +{%- endmacro %} + +{%- if is_namespace %} +{{- pkgname | e | heading }} +{% else %} +{{- pkgname | e | heading }} +{% endif %} + +{%- if is_namespace %} +.. py:module:: {{ pkgname }} +{% endif %} + +{%- if modulefirst and not is_namespace %} +{{ automodule(pkgname, automodule_options) }} +{% endif %} + +{%- if subpackages %} +{{ toctree(subpackages) }} +{% endif %} + +{%- if submodules %} +{% if separatemodules %} +{{ toctree(submodules) }} +{% else %} +{%- for submodule in submodules %} +{% if show_headings %} +{{- submodule | e | heading(2) }} +{% endif %} +{{ automodule(submodule, automodule_options) }} +{% endfor %} +{%- endif %} +{%- endif %} + +{%- if not modulefirst and not is_namespace %} +Module contents +--------------- + +{{ automodule(pkgname, automodule_options) }} +{% endif %} diff --git a/docs/_templates/apidoc/toc.rst_t b/docs/_templates/apidoc/toc.rst_t new file mode 100644 index 000000000..f0877eeb2 --- /dev/null +++ b/docs/_templates/apidoc/toc.rst_t @@ -0,0 +1,8 @@ +{{ header | heading }} + +.. toctree:: + :maxdepth: {{ maxdepth }} +{% for docname in docnames %} + {{ docname }} +{%- endfor %} + diff --git a/docs/add_your_parallel.md b/docs/add_your_parallel.md new file mode 100644 index 000000000..ae8ae7e27 --- /dev/null +++ b/docs/add_your_parallel.md @@ -0,0 +1,120 @@ +# Add Your Own Parallelism + +## Overview + +To enable researchers and engineers to extend our framework to other novel large-scale distributed training algorithm +with less effort, we have decoupled the various components in the training lifecycle. You can implement your own +parallelism by simply inheriting from the base class. + +The main components are + +1. `ProcessGroupInitializer` +2. `GradientHandler` +3. `Schedule` + +## Process Group Initializer + +Parallelism is often managed by process groups where processes involved in parallel computing are placed in the same +process group. For different parallel algorithms, different process groups need to be created. ColossalAI provides a +global context for the user to easily manage their process groups. If you wish to add new process group, you can easily +define a new class and set it in your configuration file. To define your own way of creating process groups, you can +follow the steps below to create new distributed initialization. + +1. Add your parallel mode in `colossalai.context.parallel_mode.ParallelMode` + + ```python + class ParallelMode(Enum): + GLOBAL = 'global' + DATA = 'data' + PIPELINE = 'pipe' + PIPELINE_PREV = 'pipe_prev' + PIPELINE_NEXT = 'pipe_next' + ... + + NEW_MODE = 'new_mode' # define your mode here + ``` + +2. Create a `ProcessGroupInitializer`. You can refer to examples given in `colossal.context.dist_group_initializer`. The + first six arguments are fixed. `ParallelContext` will pass in these arguments for you. If you need to set other + arguments, you can add it behind like the `arg1, arg2` in the example below. Lastly, register your initializer to the + registry by adding the decorator `@DIST_GROUP_INITIALIZER.register_module`. + + ```python + # sample initializer class + @DIST_GROUP_INITIALIZER.register_module + class MyParallelInitializer(ProcessGroupInitializer): + + def __init__(self, + rank: int, + world_size: int, + config: Config, + data_parallel_size: int, + pipeline_parlalel_size: int, + tensor_parallel_size: int, + arg1, + arg2): + super().__init__(rank, world_size, config) + self.arg1 = arg1 + self.arg2 = arg2 + # ... your variable init + + def init_parallel_groups(self): + # initialize your process groups + pass + + ``` + + Then, you can insert your new initializer to the current mode-to-initialize mapping + in `colossalai.constants.INITIALIZER_MAPPING`. You can modify the file or insert new key-value pair dynamically. + + ```python + colossalai.constants.INITIALIZER_MAPPING['new_mode'] = 'MyParallelInitializer' + ``` + +3. Set your initializer in your config file. You can pass in your own arguments if there is any. This allows + the `ParallelContext` to create your initializer and initialize your desired process groups. + + ```python + parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=x, mode='new_mode') # this is where you enable your new parallel mode + ) + ``` + +## Gradient Handler + +Gradient handlers are objects which execute the all-reduce operations on parameters' gradients. As different all-reduce +strategies may be executed for different kinds of parallelism, the user can +inherit `colossal.engine.gradient_handler.BaseGradientHandler` to implement their strategies. Currently, the library +uses the normal data parallel gradient handler which all-reduces the gradients across data parallel ranks. The data +parallel gradient handler is added to the engine automatically if data parallel is detected. You can add your own +gradient handler like below: + +```python + +from colossalai.registry import GRADIENT_HANDLER +from colossalai.engine import BaseGradientHandler + + +@GRADIENT_HANDLER.register_module +class YourGradientHandler(BaseGradientHandler): + + def handle_gradient(self): + do_something() + +``` + +Afterwards, you can specify the gradient handler you want to use in your configuration file. + +```python +dist_initializer = [ + dict(type='YourGradientHandler'), +] +``` + +## Schedule + +Schedule entails how to execute a forward and backward pass. Currently, ColossalAI provides pipeline and non-pipeline +schedules. If you want to modify how the forward and backward passes are executed, you can +inherit `colossalai.engine.BaseSchedule` and implement your idea. You can add your schedule to the engine before +training. \ No newline at end of file diff --git a/docs/amp.md b/docs/amp.md new file mode 100644 index 000000000..c2dfa77fa --- /dev/null +++ b/docs/amp.md @@ -0,0 +1,85 @@ +# Mixed Precision Training + +In Colossal-AI, we have integrated different implementations of mixed precision training: +1. torch.cuda.amp +2. apex.amp +3. tensor-parallel amp + +The first two rely on the original implementation of [PyTorch](https://pytorch.org/docs/stable/amp.html) +(version 1.6 and above) and [Nvidia Apex](https://github.com/NVIDIA/apex). However, these two methods are not compatible +with tensor parallelism. This is because that tensors are split across devices in tensor parallelism, thus, it is needed +to communicate among different processes to check if inf or nan occurs throughout the whole model weights. For the mixed +precision training with tensor parallel, we adapted this feature from [Megatron-LM](https://github.com/NVIDIA/Megatron-LM). + +To use mixed precision training, you can easily specify the `fp16` field in the configuration file. Currently, torch and +apex amp cannot be guaranteed to work with tensor and pipeline parallelism, thus, only the last one is recommended if you +are using hybrid parallelism. + +## Torch AMP + +PyTorch provides mixed precision training in version 1.6 and above. It provides an easy way to cast data to fp16 format +while keeping some operations such as reductions in fp32. You can configure the gradient scaler in the configuration. + +```python +from colossalai.engine import AMP_TYPE + +fp16=dict( + mode=AMP_TYPE.TORCH, + # below are default values for grad scaler + init_scale=2.**16, + growth_factor=2.0, + backoff_factor=0.5, + growth_interval=2000, + enabled=True +) +``` + + +## Apex AMP + +For this mode, we rely on the [Apex](https://nvidia.github.io/apex/) implementation for mixed precision training. We supported this plugin because it allows +for finer control on the granularity of mixed precision. For example, `O2` level (optimization level 2) will keep batch normalization in fp32. + +The configuration is like below. +```python +from colossalai.engine import AMP_TYPE + +fp16 = dict( + mode=AMP_TYPE.APEX, + # below are the default values + enabled=True, + opt_level='O1', + cast_model_type=None, + patch_torch_functions=None, + keep_batchnorm_fp32=None, + master_weights=None, + loss_scale=None, + cast_model_outputs=None, + num_losses=1, + verbosity=1, + min_loss_scale=None, + max_loss_scale=16777216.0 +) +``` + +## Tensor Parallel AMP + +We leveraged the Megatron-LM implementation to achieve mixed precision training while maintaining compatibility with +complex tensor and pipeline parallel. + +```python +from colossalai.engine import AMP_TYPE + +fp16 = dict( + mode=AMP_TYPE.PARALLEL, + # below are the default values + clip_grad=0, + log_num_zeros_in_grad=False, + initial_scale=2 ** 32, + min_scale=1, + growth_factor=2, + backoff_factor=0.5, + growth_interval=1000, + hysteresis=2 +) +``` \ No newline at end of file diff --git a/docs/colossalai/colossalai.builder.builder.rst b/docs/colossalai/colossalai.builder.builder.rst new file mode 100644 index 000000000..85da78ab9 --- /dev/null +++ b/docs/colossalai/colossalai.builder.builder.rst @@ -0,0 +1,5 @@ +colossalai.builder.builder +========================== + +.. automodule:: colossalai.builder.builder + :members: diff --git a/docs/colossalai/colossalai.builder.pipeline.rst b/docs/colossalai/colossalai.builder.pipeline.rst new file mode 100644 index 000000000..7b8c960bb --- /dev/null +++ b/docs/colossalai/colossalai.builder.pipeline.rst @@ -0,0 +1,5 @@ +colossalai.builder.pipeline +=========================== + +.. automodule:: colossalai.builder.pipeline + :members: diff --git a/docs/colossalai/colossalai.builder.rst b/docs/colossalai/colossalai.builder.rst new file mode 100644 index 000000000..60b8501c8 --- /dev/null +++ b/docs/colossalai/colossalai.builder.rst @@ -0,0 +1,12 @@ +colossalai.builder +================== + +.. automodule:: colossalai.builder + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.builder.builder + colossalai.builder.pipeline diff --git a/docs/colossalai/colossalai.checkpointing.rst b/docs/colossalai/colossalai.checkpointing.rst new file mode 100644 index 000000000..7db9af190 --- /dev/null +++ b/docs/colossalai/colossalai.checkpointing.rst @@ -0,0 +1,5 @@ +colossalai.checkpointing +======================== + +.. automodule:: colossalai.checkpointing + :members: diff --git a/docs/colossalai/colossalai.communication.collective.rst b/docs/colossalai/colossalai.communication.collective.rst new file mode 100644 index 000000000..5015edf98 --- /dev/null +++ b/docs/colossalai/colossalai.communication.collective.rst @@ -0,0 +1,5 @@ +colossalai.communication.collective +=================================== + +.. automodule:: colossalai.communication.collective + :members: diff --git a/docs/colossalai/colossalai.communication.p2p.rst b/docs/colossalai/colossalai.communication.p2p.rst new file mode 100644 index 000000000..79135bb86 --- /dev/null +++ b/docs/colossalai/colossalai.communication.p2p.rst @@ -0,0 +1,5 @@ +colossalai.communication.p2p +============================ + +.. automodule:: colossalai.communication.p2p + :members: diff --git a/docs/colossalai/colossalai.communication.ring.rst b/docs/colossalai/colossalai.communication.ring.rst new file mode 100644 index 000000000..c218d4bed --- /dev/null +++ b/docs/colossalai/colossalai.communication.ring.rst @@ -0,0 +1,5 @@ +colossalai.communication.ring +============================= + +.. automodule:: colossalai.communication.ring + :members: diff --git a/docs/colossalai/colossalai.communication.rst b/docs/colossalai/colossalai.communication.rst new file mode 100644 index 000000000..5086fa663 --- /dev/null +++ b/docs/colossalai/colossalai.communication.rst @@ -0,0 +1,14 @@ +colossalai.communication +======================== + +.. automodule:: colossalai.communication + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.communication.collective + colossalai.communication.p2p + colossalai.communication.ring + colossalai.communication.utils diff --git a/docs/colossalai/colossalai.communication.utils.rst b/docs/colossalai/colossalai.communication.utils.rst new file mode 100644 index 000000000..19a36cc9f --- /dev/null +++ b/docs/colossalai/colossalai.communication.utils.rst @@ -0,0 +1,5 @@ +colossalai.communication.utils +============================== + +.. automodule:: colossalai.communication.utils + :members: diff --git a/docs/colossalai/colossalai.constants.rst b/docs/colossalai/colossalai.constants.rst new file mode 100644 index 000000000..330b3e866 --- /dev/null +++ b/docs/colossalai/colossalai.constants.rst @@ -0,0 +1,5 @@ +colossalai.constants +==================== + +.. automodule:: colossalai.constants + :members: diff --git a/docs/colossalai/colossalai.context.config.rst b/docs/colossalai/colossalai.context.config.rst new file mode 100644 index 000000000..2fb1b99d3 --- /dev/null +++ b/docs/colossalai/colossalai.context.config.rst @@ -0,0 +1,5 @@ +colossalai.context.config +========================= + +.. automodule:: colossalai.context.config + :members: diff --git a/docs/colossalai/colossalai.context.parallel_context.rst b/docs/colossalai/colossalai.context.parallel_context.rst new file mode 100644 index 000000000..d1c82c517 --- /dev/null +++ b/docs/colossalai/colossalai.context.parallel_context.rst @@ -0,0 +1,5 @@ +colossalai.context.parallel\_context +==================================== + +.. automodule:: colossalai.context.parallel_context + :members: diff --git a/docs/colossalai/colossalai.context.parallel_mode.rst b/docs/colossalai/colossalai.context.parallel_mode.rst new file mode 100644 index 000000000..f7ac13749 --- /dev/null +++ b/docs/colossalai/colossalai.context.parallel_mode.rst @@ -0,0 +1,5 @@ +colossalai.context.parallel\_mode +================================= + +.. automodule:: colossalai.context.parallel_mode + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_1d.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_1d.rst new file mode 100644 index 000000000..88cbf3eba --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_1d.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_1d +============================================================== + +.. automodule:: colossalai.context.process_group_initializer.initializer_1d + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_2d.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_2d.rst new file mode 100644 index 000000000..d99a2e1c3 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_2d.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_2d +============================================================== + +.. automodule:: colossalai.context.process_group_initializer.initializer_2d + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_2p5d.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_2p5d.rst new file mode 100644 index 000000000..73d80e443 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_2p5d.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_2p5d +================================================================ + +.. automodule:: colossalai.context.process_group_initializer.initializer_2p5d + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_3d.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_3d.rst new file mode 100644 index 000000000..5cfba5ce0 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_3d.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_3d +============================================================== + +.. automodule:: colossalai.context.process_group_initializer.initializer_3d + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_data.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_data.rst new file mode 100644 index 000000000..55ad05f32 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_data.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_data +================================================================ + +.. automodule:: colossalai.context.process_group_initializer.initializer_data + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_pipeline.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_pipeline.rst new file mode 100644 index 000000000..466d5143a --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_pipeline.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_pipeline +==================================================================== + +.. automodule:: colossalai.context.process_group_initializer.initializer_pipeline + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_sequence.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_sequence.rst new file mode 100644 index 000000000..dab71cc3c --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_sequence.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_sequence +==================================================================== + +.. automodule:: colossalai.context.process_group_initializer.initializer_sequence + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.initializer_tensor.rst b/docs/colossalai/colossalai.context.process_group_initializer.initializer_tensor.rst new file mode 100644 index 000000000..0c2d8d1e9 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.initializer_tensor.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.initializer\_tensor +================================================================== + +.. automodule:: colossalai.context.process_group_initializer.initializer_tensor + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.process_group_initializer.rst b/docs/colossalai/colossalai.context.process_group_initializer.process_group_initializer.rst new file mode 100644 index 000000000..3f98723c1 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.process_group_initializer.rst @@ -0,0 +1,5 @@ +colossalai.context.process\_group\_initializer.process\_group\_initializer +========================================================================== + +.. automodule:: colossalai.context.process_group_initializer.process_group_initializer + :members: diff --git a/docs/colossalai/colossalai.context.process_group_initializer.rst b/docs/colossalai/colossalai.context.process_group_initializer.rst new file mode 100644 index 000000000..68aedaaa3 --- /dev/null +++ b/docs/colossalai/colossalai.context.process_group_initializer.rst @@ -0,0 +1,19 @@ +colossalai.context.process\_group\_initializer +============================================== + +.. automodule:: colossalai.context.process_group_initializer + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.context.process_group_initializer.initializer_1d + colossalai.context.process_group_initializer.initializer_2d + colossalai.context.process_group_initializer.initializer_2p5d + colossalai.context.process_group_initializer.initializer_3d + colossalai.context.process_group_initializer.initializer_data + colossalai.context.process_group_initializer.initializer_pipeline + colossalai.context.process_group_initializer.initializer_sequence + colossalai.context.process_group_initializer.initializer_tensor + colossalai.context.process_group_initializer.process_group_initializer diff --git a/docs/colossalai/colossalai.context.random.rst b/docs/colossalai/colossalai.context.random.rst new file mode 100644 index 000000000..8d4b9c56a --- /dev/null +++ b/docs/colossalai/colossalai.context.random.rst @@ -0,0 +1,11 @@ +colossalai.context.random +========================= + +.. automodule:: colossalai.context.random + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.context.random.seed_manager diff --git a/docs/colossalai/colossalai.context.random.seed_manager.rst b/docs/colossalai/colossalai.context.random.seed_manager.rst new file mode 100644 index 000000000..b71f35c27 --- /dev/null +++ b/docs/colossalai/colossalai.context.random.seed_manager.rst @@ -0,0 +1,5 @@ +colossalai.context.random.seed\_manager +======================================= + +.. automodule:: colossalai.context.random.seed_manager + :members: diff --git a/docs/colossalai/colossalai.context.rst b/docs/colossalai/colossalai.context.rst new file mode 100644 index 000000000..babab5099 --- /dev/null +++ b/docs/colossalai/colossalai.context.rst @@ -0,0 +1,19 @@ +colossalai.context +================== + +.. automodule:: colossalai.context + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.context.process_group_initializer + colossalai.context.random + + +.. toctree:: + :maxdepth: 2 + + colossalai.context.config + colossalai.context.parallel_context + colossalai.context.parallel_mode diff --git a/docs/colossalai/colossalai.core.rst b/docs/colossalai/colossalai.core.rst new file mode 100644 index 000000000..d9ddb76ed --- /dev/null +++ b/docs/colossalai/colossalai.core.rst @@ -0,0 +1,5 @@ +colossalai.core +=============== + +.. automodule:: colossalai.core + :members: diff --git a/docs/colossalai/colossalai.engine.amp_type.rst b/docs/colossalai/colossalai.engine.amp_type.rst new file mode 100644 index 000000000..8121b9933 --- /dev/null +++ b/docs/colossalai/colossalai.engine.amp_type.rst @@ -0,0 +1,5 @@ +colossalai.engine.amp\_type +=========================== + +.. automodule:: colossalai.engine.amp_type + :members: diff --git a/docs/colossalai/colossalai.engine.gradient_handler.rst b/docs/colossalai/colossalai.engine.gradient_handler.rst new file mode 100644 index 000000000..d7d1633a6 --- /dev/null +++ b/docs/colossalai/colossalai.engine.gradient_handler.rst @@ -0,0 +1,5 @@ +colossalai.engine.gradient\_handler +=================================== + +.. automodule:: colossalai.engine.gradient_handler + :members: diff --git a/docs/colossalai/colossalai.engine.rst b/docs/colossalai/colossalai.engine.rst new file mode 100644 index 000000000..1cd4733b8 --- /dev/null +++ b/docs/colossalai/colossalai.engine.rst @@ -0,0 +1,17 @@ +colossalai.engine +================= + +.. automodule:: colossalai.engine + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.engine.gradient_handler + colossalai.engine.schedule + + +.. toctree:: + :maxdepth: 2 + + colossalai.engine.amp_type diff --git a/docs/colossalai/colossalai.engine.schedule.rst b/docs/colossalai/colossalai.engine.schedule.rst new file mode 100644 index 000000000..2909373f0 --- /dev/null +++ b/docs/colossalai/colossalai.engine.schedule.rst @@ -0,0 +1,5 @@ +colossalai.engine.schedule +========================== + +.. automodule:: colossalai.engine.schedule + :members: diff --git a/docs/colossalai/colossalai.initialize.rst b/docs/colossalai/colossalai.initialize.rst new file mode 100644 index 000000000..d3f65076a --- /dev/null +++ b/docs/colossalai/colossalai.initialize.rst @@ -0,0 +1,5 @@ +colossalai.initialize +===================== + +.. automodule:: colossalai.initialize + :members: diff --git a/docs/colossalai/colossalai.logging.logging.rst b/docs/colossalai/colossalai.logging.logging.rst new file mode 100644 index 000000000..05374b8f4 --- /dev/null +++ b/docs/colossalai/colossalai.logging.logging.rst @@ -0,0 +1,5 @@ +colossalai.logging.logging +========================== + +.. automodule:: colossalai.logging.logging + :members: diff --git a/docs/colossalai/colossalai.logging.rst b/docs/colossalai/colossalai.logging.rst new file mode 100644 index 000000000..a7a5cec72 --- /dev/null +++ b/docs/colossalai/colossalai.logging.rst @@ -0,0 +1,11 @@ +colossalai.logging +================== + +.. automodule:: colossalai.logging + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.logging.logging diff --git a/docs/colossalai/colossalai.nn.data.base_dataset.rst b/docs/colossalai/colossalai.nn.data.base_dataset.rst new file mode 100644 index 000000000..40e5e6b03 --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.base_dataset.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.base\_dataset +================================ + +.. automodule:: colossalai.nn.data.base_dataset + :members: diff --git a/docs/colossalai/colossalai.nn.data.caltech101_dataset.rst b/docs/colossalai/colossalai.nn.data.caltech101_dataset.rst new file mode 100644 index 000000000..ed36b049f --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.caltech101_dataset.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.caltech101\_dataset +====================================== + +.. automodule:: colossalai.nn.data.caltech101_dataset + :members: diff --git a/docs/colossalai/colossalai.nn.data.cifar10_dataset.rst b/docs/colossalai/colossalai.nn.data.cifar10_dataset.rst new file mode 100644 index 000000000..efa8068a4 --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.cifar10_dataset.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.cifar10\_dataset +=================================== + +.. automodule:: colossalai.nn.data.cifar10_dataset + :members: diff --git a/docs/colossalai/colossalai.nn.data.prefetcher.rst b/docs/colossalai/colossalai.nn.data.prefetcher.rst new file mode 100644 index 000000000..5112b306e --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.prefetcher.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.prefetcher +============================= + +.. automodule:: colossalai.nn.data.prefetcher + :members: diff --git a/docs/colossalai/colossalai.nn.data.rst b/docs/colossalai/colossalai.nn.data.rst new file mode 100644 index 000000000..b63452e83 --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.rst @@ -0,0 +1,20 @@ +colossalai.nn.data +================== + +.. automodule:: colossalai.nn.data + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.data.sampler + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.data.base_dataset + colossalai.nn.data.caltech101_dataset + colossalai.nn.data.cifar10_dataset + colossalai.nn.data.prefetcher + colossalai.nn.data.wiki_dataset diff --git a/docs/colossalai/colossalai.nn.data.sampler.base_sampler.rst b/docs/colossalai/colossalai.nn.data.sampler.base_sampler.rst new file mode 100644 index 000000000..7fc47a57f --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.sampler.base_sampler.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.sampler.base\_sampler +======================================== + +.. automodule:: colossalai.nn.data.sampler.base_sampler + :members: diff --git a/docs/colossalai/colossalai.nn.data.sampler.data_parallel_sampler.rst b/docs/colossalai/colossalai.nn.data.sampler.data_parallel_sampler.rst new file mode 100644 index 000000000..0c3db3423 --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.sampler.data_parallel_sampler.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.sampler.data\_parallel\_sampler +================================================== + +.. automodule:: colossalai.nn.data.sampler.data_parallel_sampler + :members: diff --git a/docs/colossalai/colossalai.nn.data.sampler.rst b/docs/colossalai/colossalai.nn.data.sampler.rst new file mode 100644 index 000000000..e8f8ee036 --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.sampler.rst @@ -0,0 +1,12 @@ +colossalai.nn.data.sampler +========================== + +.. automodule:: colossalai.nn.data.sampler + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.data.sampler.base_sampler + colossalai.nn.data.sampler.data_parallel_sampler diff --git a/docs/colossalai/colossalai.nn.data.wiki_dataset.rst b/docs/colossalai/colossalai.nn.data.wiki_dataset.rst new file mode 100644 index 000000000..8ffd86b87 --- /dev/null +++ b/docs/colossalai/colossalai.nn.data.wiki_dataset.rst @@ -0,0 +1,5 @@ +colossalai.nn.data.wiki\_dataset +================================ + +.. automodule:: colossalai.nn.data.wiki_dataset + :members: diff --git a/docs/colossalai/colossalai.nn.layer.base_layer.rst b/docs/colossalai/colossalai.nn.layer.base_layer.rst new file mode 100644 index 000000000..c2a22f04d --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.base_layer.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.base\_layer +=============================== + +.. automodule:: colossalai.nn.layer.base_layer + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_1d.layers.rst b/docs/colossalai/colossalai.nn.layer.parallel_1d.layers.rst new file mode 100644 index 000000000..380f6bf8d --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_1d.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.parallel\_1d.layers +======================================= + +.. automodule:: colossalai.nn.layer.parallel_1d.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_1d.rst b/docs/colossalai/colossalai.nn.layer.parallel_1d.rst new file mode 100644 index 000000000..3a8ed6206 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_1d.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.parallel\_1d +================================ + +.. automodule:: colossalai.nn.layer.parallel_1d + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_1d.layers diff --git a/docs/colossalai/colossalai.nn.layer.parallel_2d.layers.rst b/docs/colossalai/colossalai.nn.layer.parallel_2d.layers.rst new file mode 100644 index 000000000..b64d402bd --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_2d.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.parallel\_2d.layers +======================================= + +.. automodule:: colossalai.nn.layer.parallel_2d.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_2d.rst b/docs/colossalai/colossalai.nn.layer.parallel_2d.rst new file mode 100644 index 000000000..f5ad41a1b --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_2d.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.parallel\_2d +================================ + +.. automodule:: colossalai.nn.layer.parallel_2d + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_2d.layers diff --git a/docs/colossalai/colossalai.nn.layer.parallel_2p5d.layers.rst b/docs/colossalai/colossalai.nn.layer.parallel_2p5d.layers.rst new file mode 100644 index 000000000..ebc99d56c --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_2p5d.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.parallel\_2p5d.layers +========================================= + +.. automodule:: colossalai.nn.layer.parallel_2p5d.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_2p5d.rst b/docs/colossalai/colossalai.nn.layer.parallel_2p5d.rst new file mode 100644 index 000000000..5869bdee9 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_2p5d.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.parallel\_2p5d +================================== + +.. automodule:: colossalai.nn.layer.parallel_2p5d + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_2p5d.layers diff --git a/docs/colossalai/colossalai.nn.layer.parallel_3d.layers.rst b/docs/colossalai/colossalai.nn.layer.parallel_3d.layers.rst new file mode 100644 index 000000000..a1702f1fc --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_3d.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.parallel\_3d.layers +======================================= + +.. automodule:: colossalai.nn.layer.parallel_3d.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_3d.rst b/docs/colossalai/colossalai.nn.layer.parallel_3d.rst new file mode 100644 index 000000000..bb55a63e5 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_3d.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.parallel\_3d +================================ + +.. automodule:: colossalai.nn.layer.parallel_3d + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_3d.layers diff --git a/docs/colossalai/colossalai.nn.layer.parallel_sequence.layers.rst b/docs/colossalai/colossalai.nn.layer.parallel_sequence.layers.rst new file mode 100644 index 000000000..54929d2e7 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_sequence.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.parallel\_sequence.layers +============================================= + +.. automodule:: colossalai.nn.layer.parallel_sequence.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_sequence.rst b/docs/colossalai/colossalai.nn.layer.parallel_sequence.rst new file mode 100644 index 000000000..24e8941d4 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_sequence.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.parallel\_sequence +====================================== + +.. automodule:: colossalai.nn.layer.parallel_sequence + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_sequence.layers diff --git a/docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.layers.rst b/docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.layers.rst new file mode 100644 index 000000000..93798dc91 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.parallel\_vision\_transformer.layers +======================================================== + +.. automodule:: colossalai.nn.layer.parallel_vision_transformer.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.rst b/docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.rst new file mode 100644 index 000000000..7c96aa19b --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.parallel_vision_transformer.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.parallel\_vision\_transformer +================================================= + +.. automodule:: colossalai.nn.layer.parallel_vision_transformer + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_vision_transformer.layers diff --git a/docs/colossalai/colossalai.nn.layer.rst b/docs/colossalai/colossalai.nn.layer.rst new file mode 100644 index 000000000..5746a2d72 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.rst @@ -0,0 +1,24 @@ +colossalai.nn.layer +=================== + +.. automodule:: colossalai.nn.layer + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.parallel_1d + colossalai.nn.layer.parallel_2d + colossalai.nn.layer.parallel_2p5d + colossalai.nn.layer.parallel_3d + colossalai.nn.layer.parallel_sequence + colossalai.nn.layer.parallel_vision_transformer + colossalai.nn.layer.vanilla_resnet + colossalai.nn.layer.vanilla_vision_transformer + colossalai.nn.layer.wrapper + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.base_layer diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_resnet.basic_block.rst b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.basic_block.rst new file mode 100644 index 000000000..f4bad38f7 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.basic_block.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.vanilla\_resnet.basic\_block +================================================ + +.. automodule:: colossalai.nn.layer.vanilla_resnet.basic_block + :members: diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_resnet.bottleneck.rst b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.bottleneck.rst new file mode 100644 index 000000000..31213e14c --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.bottleneck.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.vanilla\_resnet.bottleneck +============================================== + +.. automodule:: colossalai.nn.layer.vanilla_resnet.bottleneck + :members: diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_resnet.conv.rst b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.conv.rst new file mode 100644 index 000000000..82fb1571b --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.conv.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.vanilla\_resnet.conv +======================================== + +.. automodule:: colossalai.nn.layer.vanilla_resnet.conv + :members: diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_resnet.reslayer.rst b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.reslayer.rst new file mode 100644 index 000000000..74715cd24 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.reslayer.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.vanilla\_resnet.reslayer +============================================ + +.. automodule:: colossalai.nn.layer.vanilla_resnet.reslayer + :members: diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_resnet.rst b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.rst new file mode 100644 index 000000000..6c98cd3a7 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_resnet.rst @@ -0,0 +1,14 @@ +colossalai.nn.layer.vanilla\_resnet +=================================== + +.. automodule:: colossalai.nn.layer.vanilla_resnet + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.vanilla_resnet.basic_block + colossalai.nn.layer.vanilla_resnet.bottleneck + colossalai.nn.layer.vanilla_resnet.conv + colossalai.nn.layer.vanilla_resnet.reslayer diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.layers.rst b/docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.layers.rst new file mode 100644 index 000000000..e58155c62 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.layers.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.vanilla\_vision\_transformer.layers +======================================================= + +.. automodule:: colossalai.nn.layer.vanilla_vision_transformer.layers + :members: diff --git a/docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.rst b/docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.rst new file mode 100644 index 000000000..5164b03f6 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.vanilla_vision_transformer.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.vanilla\_vision\_transformer +================================================ + +.. automodule:: colossalai.nn.layer.vanilla_vision_transformer + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.vanilla_vision_transformer.layers diff --git a/docs/colossalai/colossalai.nn.layer.wrapper.lambda_wrapper.rst b/docs/colossalai/colossalai.nn.layer.wrapper.lambda_wrapper.rst new file mode 100644 index 000000000..f2ced6725 --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.wrapper.lambda_wrapper.rst @@ -0,0 +1,5 @@ +colossalai.nn.layer.wrapper.lambda\_wrapper +=========================================== + +.. automodule:: colossalai.nn.layer.wrapper.lambda_wrapper + :members: diff --git a/docs/colossalai/colossalai.nn.layer.wrapper.rst b/docs/colossalai/colossalai.nn.layer.wrapper.rst new file mode 100644 index 000000000..40ed618cb --- /dev/null +++ b/docs/colossalai/colossalai.nn.layer.wrapper.rst @@ -0,0 +1,11 @@ +colossalai.nn.layer.wrapper +=========================== + +.. automodule:: colossalai.nn.layer.wrapper + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.layer.wrapper.lambda_wrapper diff --git a/docs/colossalai/colossalai.nn.loss.base_loss.rst b/docs/colossalai/colossalai.nn.loss.base_loss.rst new file mode 100644 index 000000000..0396ac1b5 --- /dev/null +++ b/docs/colossalai/colossalai.nn.loss.base_loss.rst @@ -0,0 +1,5 @@ +colossalai.nn.loss.base\_loss +============================= + +.. automodule:: colossalai.nn.loss.base_loss + :members: diff --git a/docs/colossalai/colossalai.nn.loss.cross_entropy_1d.rst b/docs/colossalai/colossalai.nn.loss.cross_entropy_1d.rst new file mode 100644 index 000000000..aeb21ae3b --- /dev/null +++ b/docs/colossalai/colossalai.nn.loss.cross_entropy_1d.rst @@ -0,0 +1,5 @@ +colossalai.nn.loss.cross\_entropy\_1d +===================================== + +.. automodule:: colossalai.nn.loss.cross_entropy_1d + :members: diff --git a/docs/colossalai/colossalai.nn.loss.cross_entropy_2d.rst b/docs/colossalai/colossalai.nn.loss.cross_entropy_2d.rst new file mode 100644 index 000000000..780a66557 --- /dev/null +++ b/docs/colossalai/colossalai.nn.loss.cross_entropy_2d.rst @@ -0,0 +1,5 @@ +colossalai.nn.loss.cross\_entropy\_2d +===================================== + +.. automodule:: colossalai.nn.loss.cross_entropy_2d + :members: diff --git a/docs/colossalai/colossalai.nn.loss.cross_entropy_2p5d.rst b/docs/colossalai/colossalai.nn.loss.cross_entropy_2p5d.rst new file mode 100644 index 000000000..dd136dca2 --- /dev/null +++ b/docs/colossalai/colossalai.nn.loss.cross_entropy_2p5d.rst @@ -0,0 +1,5 @@ +colossalai.nn.loss.cross\_entropy\_2p5d +======================================= + +.. automodule:: colossalai.nn.loss.cross_entropy_2p5d + :members: diff --git a/docs/colossalai/colossalai.nn.loss.cross_entropy_3d.rst b/docs/colossalai/colossalai.nn.loss.cross_entropy_3d.rst new file mode 100644 index 000000000..9b8610f31 --- /dev/null +++ b/docs/colossalai/colossalai.nn.loss.cross_entropy_3d.rst @@ -0,0 +1,5 @@ +colossalai.nn.loss.cross\_entropy\_3d +===================================== + +.. automodule:: colossalai.nn.loss.cross_entropy_3d + :members: diff --git a/docs/colossalai/colossalai.nn.loss.rst b/docs/colossalai/colossalai.nn.loss.rst new file mode 100644 index 000000000..face8dbef --- /dev/null +++ b/docs/colossalai/colossalai.nn.loss.rst @@ -0,0 +1,15 @@ +colossalai.nn.loss +================== + +.. automodule:: colossalai.nn.loss + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.loss.base_loss + colossalai.nn.loss.cross_entropy_1d + colossalai.nn.loss.cross_entropy_2d + colossalai.nn.loss.cross_entropy_2p5d + colossalai.nn.loss.cross_entropy_3d diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.cosine.rst b/docs/colossalai/colossalai.nn.lr_scheduler.cosine.rst new file mode 100644 index 000000000..a7c636ad3 --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.cosine.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.cosine +================================== + +.. automodule:: colossalai.nn.lr_scheduler.cosine + :members: diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.delayed.rst b/docs/colossalai/colossalai.nn.lr_scheduler.delayed.rst new file mode 100644 index 000000000..2a86c4b2a --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.delayed.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.delayed +=================================== + +.. automodule:: colossalai.nn.lr_scheduler.delayed + :members: diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.linear.rst b/docs/colossalai/colossalai.nn.lr_scheduler.linear.rst new file mode 100644 index 000000000..5e917edc2 --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.linear.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.linear +================================== + +.. automodule:: colossalai.nn.lr_scheduler.linear + :members: diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.multistep.rst b/docs/colossalai/colossalai.nn.lr_scheduler.multistep.rst new file mode 100644 index 000000000..4248a6386 --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.multistep.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.multistep +===================================== + +.. automodule:: colossalai.nn.lr_scheduler.multistep + :members: diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.onecycle.rst b/docs/colossalai/colossalai.nn.lr_scheduler.onecycle.rst new file mode 100644 index 000000000..7f2fd4758 --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.onecycle.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.onecycle +==================================== + +.. automodule:: colossalai.nn.lr_scheduler.onecycle + :members: diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.poly.rst b/docs/colossalai/colossalai.nn.lr_scheduler.poly.rst new file mode 100644 index 000000000..c1618812a --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.poly.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.poly +================================ + +.. automodule:: colossalai.nn.lr_scheduler.poly + :members: diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.rst b/docs/colossalai/colossalai.nn.lr_scheduler.rst new file mode 100644 index 000000000..427a3ee45 --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.rst @@ -0,0 +1,17 @@ +colossalai.nn.lr\_scheduler +=========================== + +.. automodule:: colossalai.nn.lr_scheduler + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.lr_scheduler.cosine + colossalai.nn.lr_scheduler.delayed + colossalai.nn.lr_scheduler.linear + colossalai.nn.lr_scheduler.multistep + colossalai.nn.lr_scheduler.onecycle + colossalai.nn.lr_scheduler.poly + colossalai.nn.lr_scheduler.torch diff --git a/docs/colossalai/colossalai.nn.lr_scheduler.torch.rst b/docs/colossalai/colossalai.nn.lr_scheduler.torch.rst new file mode 100644 index 000000000..f8d552bf1 --- /dev/null +++ b/docs/colossalai/colossalai.nn.lr_scheduler.torch.rst @@ -0,0 +1,5 @@ +colossalai.nn.lr\_scheduler.torch +================================= + +.. automodule:: colossalai.nn.lr_scheduler.torch + :members: diff --git a/docs/colossalai/colossalai.nn.model.base_model.rst b/docs/colossalai/colossalai.nn.model.base_model.rst new file mode 100644 index 000000000..aac96be7b --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.base_model.rst @@ -0,0 +1,5 @@ +colossalai.nn.model.base\_model +=============================== + +.. automodule:: colossalai.nn.model.base_model + :members: diff --git a/docs/colossalai/colossalai.nn.model.bert.bert.rst b/docs/colossalai/colossalai.nn.model.bert.bert.rst new file mode 100644 index 000000000..a13db3252 --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.bert.bert.rst @@ -0,0 +1,5 @@ +colossalai.nn.model.bert.bert +============================= + +.. automodule:: colossalai.nn.model.bert.bert + :members: diff --git a/docs/colossalai/colossalai.nn.model.bert.rst b/docs/colossalai/colossalai.nn.model.bert.rst new file mode 100644 index 000000000..2d436fd1b --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.bert.rst @@ -0,0 +1,11 @@ +colossalai.nn.model.bert +======================== + +.. automodule:: colossalai.nn.model.bert + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.model.bert.bert diff --git a/docs/colossalai/colossalai.nn.model.rst b/docs/colossalai/colossalai.nn.model.rst new file mode 100644 index 000000000..14acfcb7b --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.rst @@ -0,0 +1,18 @@ +colossalai.nn.model +=================== + +.. automodule:: colossalai.nn.model + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.model.bert + colossalai.nn.model.vanilla_resnet + colossalai.nn.model.vision_transformer + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.model.base_model diff --git a/docs/colossalai/colossalai.nn.model.vanilla_resnet.resnet.rst b/docs/colossalai/colossalai.nn.model.vanilla_resnet.resnet.rst new file mode 100644 index 000000000..a2dd49ae3 --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.vanilla_resnet.resnet.rst @@ -0,0 +1,5 @@ +colossalai.nn.model.vanilla\_resnet.resnet +========================================== + +.. automodule:: colossalai.nn.model.vanilla_resnet.resnet + :members: diff --git a/docs/colossalai/colossalai.nn.model.vanilla_resnet.rst b/docs/colossalai/colossalai.nn.model.vanilla_resnet.rst new file mode 100644 index 000000000..148ce723d --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.vanilla_resnet.rst @@ -0,0 +1,11 @@ +colossalai.nn.model.vanilla\_resnet +=================================== + +.. automodule:: colossalai.nn.model.vanilla_resnet + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.model.vanilla_resnet.resnet diff --git a/docs/colossalai/colossalai.nn.model.vision_transformer.rst b/docs/colossalai/colossalai.nn.model.vision_transformer.rst new file mode 100644 index 000000000..edfd07dfa --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.vision_transformer.rst @@ -0,0 +1,11 @@ +colossalai.nn.model.vision\_transformer +======================================= + +.. automodule:: colossalai.nn.model.vision_transformer + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.model.vision_transformer.vision_transformer diff --git a/docs/colossalai/colossalai.nn.model.vision_transformer.vision_transformer.rst b/docs/colossalai/colossalai.nn.model.vision_transformer.vision_transformer.rst new file mode 100644 index 000000000..08e6a96ef --- /dev/null +++ b/docs/colossalai/colossalai.nn.model.vision_transformer.vision_transformer.rst @@ -0,0 +1,5 @@ +colossalai.nn.model.vision\_transformer.vision\_transformer +=========================================================== + +.. automodule:: colossalai.nn.model.vision_transformer.vision_transformer + :members: diff --git a/docs/colossalai/colossalai.nn.multi_tensor_apply.multi_tensor_apply.rst b/docs/colossalai/colossalai.nn.multi_tensor_apply.multi_tensor_apply.rst new file mode 100644 index 000000000..812a4d7eb --- /dev/null +++ b/docs/colossalai/colossalai.nn.multi_tensor_apply.multi_tensor_apply.rst @@ -0,0 +1,5 @@ +colossalai.nn.multi\_tensor\_apply.multi\_tensor\_apply +======================================================= + +.. automodule:: colossalai.nn.multi_tensor_apply.multi_tensor_apply + :members: diff --git a/docs/colossalai/colossalai.nn.multi_tensor_apply.rst b/docs/colossalai/colossalai.nn.multi_tensor_apply.rst new file mode 100644 index 000000000..f1ae7c144 --- /dev/null +++ b/docs/colossalai/colossalai.nn.multi_tensor_apply.rst @@ -0,0 +1,11 @@ +colossalai.nn.multi\_tensor\_apply +================================== + +.. automodule:: colossalai.nn.multi_tensor_apply + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.multi_tensor_apply.multi_tensor_apply diff --git a/docs/colossalai/colossalai.nn.optimizer.fp16_optimizer.rst b/docs/colossalai/colossalai.nn.optimizer.fp16_optimizer.rst new file mode 100644 index 000000000..977bd817d --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.fp16_optimizer.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.fp16\_optimizer +======================================= + +.. automodule:: colossalai.nn.optimizer.fp16_optimizer + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.fused_adam.rst b/docs/colossalai/colossalai.nn.optimizer.fused_adam.rst new file mode 100644 index 000000000..60af624cb --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.fused_adam.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.fused\_adam +=================================== + +.. automodule:: colossalai.nn.optimizer.fused_adam + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.fused_lamb.rst b/docs/colossalai/colossalai.nn.optimizer.fused_lamb.rst new file mode 100644 index 000000000..66c0fa4ca --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.fused_lamb.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.fused\_lamb +=================================== + +.. automodule:: colossalai.nn.optimizer.fused_lamb + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.fused_sgd.rst b/docs/colossalai/colossalai.nn.optimizer.fused_sgd.rst new file mode 100644 index 000000000..2ecc77c33 --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.fused_sgd.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.fused\_sgd +================================== + +.. automodule:: colossalai.nn.optimizer.fused_sgd + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.lamb.rst b/docs/colossalai/colossalai.nn.optimizer.lamb.rst new file mode 100644 index 000000000..57199ea36 --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.lamb.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.lamb +============================ + +.. automodule:: colossalai.nn.optimizer.lamb + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.loss_scaler.rst b/docs/colossalai/colossalai.nn.optimizer.loss_scaler.rst new file mode 100644 index 000000000..5ee8b9650 --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.loss_scaler.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.loss\_scaler +==================================== + +.. automodule:: colossalai.nn.optimizer.loss_scaler + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.rst b/docs/colossalai/colossalai.nn.optimizer.rst new file mode 100644 index 000000000..d01f50d62 --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.rst @@ -0,0 +1,19 @@ +colossalai.nn.optimizer +======================= + +.. automodule:: colossalai.nn.optimizer + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.optimizer.fp16_optimizer + colossalai.nn.optimizer.fused_adam + colossalai.nn.optimizer.fused_lamb + colossalai.nn.optimizer.fused_sgd + colossalai.nn.optimizer.lamb + colossalai.nn.optimizer.loss_scaler + colossalai.nn.optimizer.zero_redundancy_optimizer_level_1 + colossalai.nn.optimizer.zero_redundancy_optimizer_level_2 + colossalai.nn.optimizer.zero_redundancy_optimizer_level_3 diff --git a/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_1.rst b/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_1.rst new file mode 100644 index 000000000..04f2e3f96 --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_1.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.zero\_redundancy\_optimizer\_level\_1 +============================================================= + +.. automodule:: colossalai.nn.optimizer.zero_redundancy_optimizer_level_1 + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_2.rst b/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_2.rst new file mode 100644 index 000000000..b425f4305 --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_2.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.zero\_redundancy\_optimizer\_level\_2 +============================================================= + +.. automodule:: colossalai.nn.optimizer.zero_redundancy_optimizer_level_2 + :members: diff --git a/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_3.rst b/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_3.rst new file mode 100644 index 000000000..798231e0b --- /dev/null +++ b/docs/colossalai/colossalai.nn.optimizer.zero_redundancy_optimizer_level_3.rst @@ -0,0 +1,5 @@ +colossalai.nn.optimizer.zero\_redundancy\_optimizer\_level\_3 +============================================================= + +.. automodule:: colossalai.nn.optimizer.zero_redundancy_optimizer_level_3 + :members: diff --git a/docs/colossalai/colossalai.nn.rst b/docs/colossalai/colossalai.nn.rst new file mode 100644 index 000000000..4d4593312 --- /dev/null +++ b/docs/colossalai/colossalai.nn.rst @@ -0,0 +1,16 @@ +colossalai.nn +============= + +.. automodule:: colossalai.nn + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.nn.data + colossalai.nn.layer + colossalai.nn.loss + colossalai.nn.lr_scheduler + colossalai.nn.model + colossalai.nn.multi_tensor_apply + colossalai.nn.optimizer diff --git a/docs/colossalai/colossalai.registry.registry.rst b/docs/colossalai/colossalai.registry.registry.rst new file mode 100644 index 000000000..e942d7969 --- /dev/null +++ b/docs/colossalai/colossalai.registry.registry.rst @@ -0,0 +1,5 @@ +colossalai.registry.registry +============================ + +.. automodule:: colossalai.registry.registry + :members: diff --git a/docs/colossalai/colossalai.registry.rst b/docs/colossalai/colossalai.registry.rst new file mode 100644 index 000000000..0f294f6d1 --- /dev/null +++ b/docs/colossalai/colossalai.registry.rst @@ -0,0 +1,11 @@ +colossalai.registry +=================== + +.. automodule:: colossalai.registry + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.registry.registry diff --git a/docs/colossalai/colossalai.rst b/docs/colossalai/colossalai.rst new file mode 100644 index 000000000..414ee8120 --- /dev/null +++ b/docs/colossalai/colossalai.rst @@ -0,0 +1,27 @@ +colossalai +========== + +.. automodule:: colossalai + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.builder + colossalai.communication + colossalai.context + colossalai.engine + colossalai.logging + colossalai.nn + colossalai.registry + colossalai.trainer + colossalai.utils + + +.. toctree:: + :maxdepth: 2 + + colossalai.checkpointing + colossalai.constants + colossalai.core + colossalai.initialize diff --git a/docs/colossalai/colossalai.trainer.hooks.rst b/docs/colossalai/colossalai.trainer.hooks.rst new file mode 100644 index 000000000..84cc6797b --- /dev/null +++ b/docs/colossalai/colossalai.trainer.hooks.rst @@ -0,0 +1,5 @@ +colossalai.trainer.hooks +======================== + +.. automodule:: colossalai.trainer.hooks + :members: diff --git a/docs/colossalai/colossalai.trainer.metric.rst b/docs/colossalai/colossalai.trainer.metric.rst new file mode 100644 index 000000000..b6b06555d --- /dev/null +++ b/docs/colossalai/colossalai.trainer.metric.rst @@ -0,0 +1,5 @@ +colossalai.trainer.metric +========================= + +.. automodule:: colossalai.trainer.metric + :members: diff --git a/docs/colossalai/colossalai.trainer.rst b/docs/colossalai/colossalai.trainer.rst new file mode 100644 index 000000000..b2ccefd3e --- /dev/null +++ b/docs/colossalai/colossalai.trainer.rst @@ -0,0 +1,16 @@ +colossalai.trainer +================== + +.. automodule:: colossalai.trainer + :members: + +.. toctree:: + :maxdepth: 2 + + colossalai.trainer.hooks + + +.. toctree:: + :maxdepth: 2 + + colossalai.trainer.metric diff --git a/docs/colossalai/colossalai.utils.activation_checkpoint.rst b/docs/colossalai/colossalai.utils.activation_checkpoint.rst new file mode 100644 index 000000000..671b5fe9e --- /dev/null +++ b/docs/colossalai/colossalai.utils.activation_checkpoint.rst @@ -0,0 +1,5 @@ +colossalai.utils.activation\_checkpoint +======================================= + +.. automodule:: colossalai.utils.activation_checkpoint + :members: diff --git a/docs/colossalai/colossalai.utils.common.rst b/docs/colossalai/colossalai.utils.common.rst new file mode 100644 index 000000000..cb9f9c14e --- /dev/null +++ b/docs/colossalai/colossalai.utils.common.rst @@ -0,0 +1,5 @@ +colossalai.utils.common +======================= + +.. automodule:: colossalai.utils.common + :members: diff --git a/docs/colossalai/colossalai.utils.cuda.rst b/docs/colossalai/colossalai.utils.cuda.rst new file mode 100644 index 000000000..ec428c5ef --- /dev/null +++ b/docs/colossalai/colossalai.utils.cuda.rst @@ -0,0 +1,5 @@ +colossalai.utils.cuda +===================== + +.. automodule:: colossalai.utils.cuda + :members: diff --git a/docs/colossalai/colossalai.utils.memory.rst b/docs/colossalai/colossalai.utils.memory.rst new file mode 100644 index 000000000..67c5d6002 --- /dev/null +++ b/docs/colossalai/colossalai.utils.memory.rst @@ -0,0 +1,5 @@ +colossalai.utils.memory +======================= + +.. automodule:: colossalai.utils.memory + :members: diff --git a/docs/colossalai/colossalai.utils.rst b/docs/colossalai/colossalai.utils.rst new file mode 100644 index 000000000..bfe62172f --- /dev/null +++ b/docs/colossalai/colossalai.utils.rst @@ -0,0 +1,15 @@ +colossalai.utils +================ + +.. automodule:: colossalai.utils + :members: + + +.. toctree:: + :maxdepth: 2 + + colossalai.utils.activation_checkpoint + colossalai.utils.common + colossalai.utils.cuda + colossalai.utils.memory + colossalai.utils.timer diff --git a/docs/colossalai/colossalai.utils.timer.rst b/docs/colossalai/colossalai.utils.timer.rst new file mode 100644 index 000000000..2014c85f5 --- /dev/null +++ b/docs/colossalai/colossalai.utils.timer.rst @@ -0,0 +1,5 @@ +colossalai.utils.timer +====================== + +.. automodule:: colossalai.utils.timer + :members: diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 000000000..bb1e1943a --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,87 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys + +sys.path.insert(0, os.path.abspath('..')) + +# -- Project information ----------------------------------------------------- + +project = 'ColossalAI' +copyright = '2021, HPC-AI Tech' +author = 'HPC-AI Technology Inc.' + +# The full version, including alpha/beta/rc tags +release = '0.0.1' + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.mathjax', + 'myst_parser', + 'sphinx.ext.napoleon', +] + +# Disable docstring inheritance +autodoc_inherit_docstrings = False + +# Disable displaying type annotations, these can be very verbose +autodoc_typehints = 'none' + +# Enable overriding of function signatures in the first line of the docstring. +autodoc_docstring_signature = True +autodoc_default_options = { + 'member-order': 'bysource' +} + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +html_show_sourcelink = False +html_theme_options = { + 'navigation_depth': 2, +} + +html_context = { + 'display_github': False, + 'github_user': 'hpcaitech', + 'github_repo': 'ColossalAI', + # 'github_version': 'master/docs/', +} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_css_files = [ + 'css/rtd_theme.css', +] + +# -- Extension configuration ------------------------------------------------- +source_suffix = ['.rst', '.md', '.MD'] diff --git a/docs/config.md b/docs/config.md new file mode 100644 index 000000000..2ab15cde4 --- /dev/null +++ b/docs/config.md @@ -0,0 +1,187 @@ +# Config file + +Here is an example config file of training ViT on cifar: + +```python +# build train_dataset and train_dataloader from this dictionary +# It is not compulsory in Config File, instead, you can input this dictionary as an argument into colossalai.initialize() +train_data = dict( + # dictionary for building Dataset + dataset=dict( + # the type CIFAR10Dataset has to be registered + type='CIFAR10Dataset', + root='/path/to/data', + # transform pipeline + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + # dictionary for building Dataloader + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + # num_workers=1, + shuffle=True, + ) +) + +# build test_dataset and test_dataloader from this dictionary +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root='/path/to/data', + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + # num_workers=1, + ) +) + +# compulsory +# build optimizer from this dictionary +optimizer = dict( + # Avaluable types: 'ZeroRedundancyOptimizer_Level_1', 'ZeroRedundancyOptimizer_Level_2', 'ZeroRedundancyOptimizer_Level_3' + # 'Adam', 'Lamb', 'SGD', 'FusedLAMB', 'FusedAdam', 'FusedSGD', 'FP16Optimizer' + type='Adam', + lr=0.001, + weight_decay=0 +) + +# compulsory +# build loss function from this dictionary +loss = dict( + # Avaluable types: + # 'CrossEntropyLoss2D', 'CrossEntropyLoss2p5D', 'CrossEntropyLoss3D' + type='CrossEntropyLoss2D', +) + +# compulsory +# build model from this dictionary +model = dict( + # types avaluable: 'PretrainBERT', 'VanillaResNet', 'VisionTransformerFromConfig' + type='VisionTransformerFromConfig', + # each key-value pair above refers to a layer + # input data pass through these layers recursively + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + # ViTBlock is a submodule + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + checkpoint=True + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=4, + checkpoint=True + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +# hooks are built when initializing trainer +# possible hooks: 'BaseHook', 'MetricHook','LoadCheckpointHook' +# 'SaveCheckpointHook','LossHook', 'AccuracyHook', 'Accuracy2DHook' +# 'LogMetricByEpochHook', 'TensorboardHook','LogTimingByEpochHook', 'LogMemoryByEpochHook' +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='LogTimingByEpochHook'), + dict(type='LogMemoryByEpochHook'), + dict(type='Accuracy2DHook'), + dict(type='LossHook'), + # dict(type='TensorboardHook', log_dir='./tfb_logs'), + # dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] + +# three keys: pipeline, tensor, data +# if data=dict(size=1), which means no data parallelization, then there is no need to define it +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +# not compulsory +# pipeline or no pipeline schedule +fp16 = dict( + mode=AMP_TYPE.PARALLEL, + initial_scale=2 ** 8 +) + +# not compulsory +# build learning rate scheduler +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +schedule = dict( + num_microbatches=8 +) + +# training stopping criterion +# you can give num_steps or num_epochs +num_epochs = 60 + +# config logging path +logging = dict( + root_path='./logs' +) +``` \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 000000000..07c309637 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,40 @@ +.. ColossalAI documentation master file, created by + sphinx-quickstart on Mon Oct 11 17:05:05 2021. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +ColossalAI documentation +====================================== +.. toctree:: + :maxdepth: 1 + :caption: GETTING STARTED + + installation.md + run_demo.md + + +.. toctree:: + :maxdepth: 1 + :caption: CUSTOMIZE YOUR TRAINING + + parallelization.md + model.md + trainer_engine.md + amp.md + zero.md + add_your_parallel.md + config.md + + + +.. toctree:: + :maxdepth: 2 + :caption: API REFERENCE + + colossalai/colossalai + + +Indices and tables +================== + +* :ref:`genindex` \ No newline at end of file diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 000000000..5b379411a --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,25 @@ +# Setup + +## Install with pip + +```bash +pip install colossalai +``` + +## Install from source + +```shell +git clone git@github.com:hpcaitech/ColossalAI.git +cd ColossalAI +# install dependency +pip install -r requirements/requirements.txt + +# install colossalai +pip install . +``` + +Install and enable CUDA kernel fusion (compulsory installation when using fused optimizer) + +``` +pip install -v --no-cache-dir --global-option="--cuda_ext" . +``` diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 000000000..cf7321411 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=.build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/model.md b/docs/model.md new file mode 100644 index 000000000..fe1c3631b --- /dev/null +++ b/docs/model.md @@ -0,0 +1,28 @@ +# Define your own parallel model + +## Write a Simple 2D Parallel Model + +Let's say we have a huge MLP model and its very large hidden size makes it difficult to fit into a single GPU. We can +then distribute the model weights across GPUs in a 2D mesh while you still write your model in a familiar way. + +```python +from colossalai.nn import Linear2D +import torch.nn as nn + + +class MLP_2D(nn.Module): + + def __init__(self): + super().__init__() + self.linear_1 = Linear2D(in_features=1024, out_features=16384) + self.linear_2 = Linear2D(in_features=16384, out_features=1024) + + def forward(self, x): + x = self.linear_1(x) + x = self.linear_2(x) + return x + +``` + +## Use pre-defined model +Our Model Zoo supports *BERT*, *VIT*, *MLP-Mixer* of different sizes. \ No newline at end of file diff --git a/docs/parallelization.md b/docs/parallelization.md new file mode 100644 index 000000000..84911d76e --- /dev/null +++ b/docs/parallelization.md @@ -0,0 +1,216 @@ +# Parallelization + +## Configure the Combination of Parallelization + +We support multiple parallelization in our library. + +Hybrid parallelism in our codebase, namely data parallelism, pipeline parallelism and tensor parallelism ( +1D,2D, 2.5D, 3D). You can initialize the corresponding process group by setting `parallel` in our config. The parallel +configuration can be easily deployed by a dictionary in configuration file. The configuration dictionary must obey the +following format. Data parallel size will be inferred automatically based on your inputs to pipeline parallelism and +tensor parallelism. + +```python +parallel = dict( + pipeline=dict["size": int], + tensor=dict["size": int, "mode": '1d' or '2d' or '2.5d' or '3d', "kwargs": Any] +) +``` + +The name of the dictionary variable should be **parallel**. All the arguments even **parallel** itself are optional and data, +pipeline, tensor parallel size will be set to defaulted value 1. The value of data, pipeline and tensor can be a int +representing the size of specific parallel dimension or a dictionary with a key called "size". The key "mode" +represents the way of model parallelism. + +## Data Parallel +Data parallel is the most common way to distribute your training task by splitting data into several shards and train +on a single shard on each device. The configuration for data parallel is detected automatically and set for you. You do +not have to explicitly set them in your configurations. When data parallel size is larger than 1, Colossal-AI automatically +adds the distributed data sampler to the dataloader to shard the dataset. + + +## Pipeline Parallel (experimental) + +Pipeline parallelism is to split the model into several partitions by layer. For example, let's assume we have a simple +model which consists of two linear layer. We have two GPUs, and we can allocate the first linear layer to the first GPU +and the second layer to the second GPU. This example of course wastes the computing resources and is only to demonstrate +the idea of pipeline parallelism. + +As PyTorch is based on dynamic computation graph, the computation flow is not known until execution. To support pipeline +parallelism in PyTorch, you may need to add one more attribute in your model class which tells Colossal-AI the sequence +of execution. One example you can refer is `colossalai.nn.VanillaResNet`. + +```python +from colossalai.nn import BaseModel +import torch + +class VanillaResNet(BaseModel): + + def __init__( + self, + num_cls: int, + block_type: str, + layers: List[int], + norm_layer_type: str = 'BatchNorm2d', + in_channels: int = 3, + groups: int = 1, + width_per_group: int = 64, + zero_init_residual: bool = False, + replace_stride_with_dilation: Optional[List[bool]] = None, + dilations=(1, 1, 1, 1) + ) -> None: + super().__init__() + + ... # some model params + + self.layers_cfg = [ + # conv1 + dict(type='Conv2d', + in_channels=in_channels, + out_channels=self.inplanes, + kernel_size=7, + stride=2, + padding=3, + bias=False), + # bn1 + dict( + type=norm_layer_type, + num_features=self.inplanes + ), + # relu + dict( + type='ReLU', + inplace=True + ), + # maxpool + dict( + type='MaxPool2d', + kernel_size=3, + stride=2, + padding=1 + ), + # layer 1 + dict( + inplanes=self.inplanes, + planes=64, + blocks=self.blocks[0], + dilation=self.dilations[0], + **self.reslayer_common_cfg + ), + # layer 2 + dict( + inplanes=64 * self.block_expansion, + planes=128, + blocks=self.blocks[1], + stride=2, + dilate=replace_stride_with_dilation[0], + dilation=self.dilations[1], + **self.reslayer_common_cfg + ), + # layer 3 + dict( + inplanes=128 * self.block_expansion, + planes=256, + blocks=layers[2], + stride=2, + dilate=replace_stride_with_dilation[1], + dilation=self.dilations[2], + **self.reslayer_common_cfg + ), + # layer 4 + dict( + inplanes=256 * self.block_expansion, + planes=512, + blocks=layers[3], stride=2, + dilate=replace_stride_with_dilation[2], + dilation=self.dilations[3], + **self.reslayer_common_cfg + ), + # avg pool + dict( + type='AdaptiveAvgPool2d', + output_size=(1, 1) + ), + # flatten + dict( + type='LambdaWrapper', + func=lambda mod, x: torch.flatten(x, 1) + ), + # linear + dict( + type='Linear', + in_features=512 * self.block_expansion, + out_features=num_cls + ) + ] +``` + +You can set the number of pipeline stages in your configuration file. When pipeline size is larger than 1, Colossal-AI +will automatically creates the pipeline schedule which defines the forward and backward step. You can specify how many microbatches +to run in each step in the `schedule` configuration. + +```python +parallel = dict( + pipeline=dict(size=1), # number of pipeline stages + tensor=dict(size=1, mode=None) +) + +schedule = dict( + num_microbatches = 4 # set the number of microbatches per step +) +``` + +## 1D, 2D, 2.5D and 3D Parallel +To enable hybrid parallelism, we provide an array of tensor parallelism. We provide the list of papers which match each +tensor parallel method. These parallel modes need to work with the distributed layers provided by Colossal-AI. +- 1D: [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) + +- 2D: [An Efficient 2D Method for Training Super-Large Deep Learning Models](https://arxiv.org/abs/2104.05343) +2D parallel relies on the SUMMA matrix multiplication algorithm and splits the input data, +model weights and layer outputs along two different dimensions. The tensor chunks are distributed over a 2D mesh of $P = N^2$ +devices where N is the number of tensor chunks in a single dimension. + +- 2.5D: [2.5-dimensional distributed model training](https://arxiv.org/abs/2105.14500) +Inspired by the 2.5D matrix multi-plication algorithm, 2.5D parallel introduces a novel tensor parallelism which further +parallelizes 2D tensor parallelism. An amount of $P = N^2 ∗ d$ processors are arranged into d layers, +where each layer performs matrix multiplication operations independently with a dimension N. + +- 3D: [Maximizing Parallelism in Distributed Training for Huge Neural Networks](https://arxiv.org/abs/2105.14450) +We also introduce a 3D tensor parallelism that parallelizes neural networks on a 3D processor cube. This method achieves +the optimal, $O(P^{1/3})$ communication overhead on P processors, while both computation and memory usage are evenly distributed +through optimized load balancing of parameters as well as activations. + + + +```python +# 1D parallel +parallel = dict( + pipeline=dict(size=1), # number of pipeline stages + tensor=dict(size=4, mode='1d') +) + +# 2D parallel +parallel = dict( + pipeline=dict(size=1), # number of pipeline stages + tensor=dict(size=4, mode='2d') +) + +# 2.5D parallel +parallel = dict( + pipeline=dict(size=1), # number of pipeline stages + tensor=dict(size=8, mode='2.5d', depth=2) +) + +# 3D parallel +parallel = dict( + pipeline=dict(size=1), # number of pipeline stages + tensor=dict(size=8, mode='3d') +) +``` + + +## Sequence Parallel (experimental) + +Sequence parallel is to support long-sequence modelling such as document-level text understanding and medical imaging. +This method is proposed in [Sequence Parallelism: Making 4D Parallelism Possible](https://arxiv.org/abs/2105.13120). +This feature is still in development is only experimental for now. \ No newline at end of file diff --git a/docs/run_demo.md b/docs/run_demo.md new file mode 100644 index 000000000..91b0871b3 --- /dev/null +++ b/docs/run_demo.md @@ -0,0 +1,74 @@ +# Quick demo + +ColossalAI is an integrated large-scale deep learning framework with efficient parallelization techniques. The framework +can accelerate model training on distributed systems with multiple GPUs by applying parallelization techniques. The +framework can also run on systems with only one GPU. Quick demos showing how to use ColossalAI are given below. + +## Single GPU + +ColossalAI can be used to train deep learning models on systems with only one GPU and achieve baseline +performances. [Here](https://colab.research.google.com/drive/1fJnqqFzPuzZ_kn1lwCpG2nh3l2ths0KE?usp=sharing#scrollTo=cQ_y7lBG09LS) +is an example showing how to train a LeNet model on the CIFAR10 dataset using ColossalAI. + +## Multiple GPUs + +ColossalAI can be used to train deep learning models on distributed systems with multiple GPUs and accelerate the +training process drastically by applying efficient parallelization techiniques, which will be elaborated in +the [Parallelization](parallelization.md) section below. Run the code below on your distributed system with 4 GPUs, +where `HOST` is the IP address of your system. Note that we use +the [Slurm](https://slurm.schedmd.com/documentation.html) job scheduling system here. + +```bash +HOST=xxx.xxx.xxx.xxx srun ./scripts/slurm_dist_train.sh ./example/train_vit_2d.py ./configs/vit/vit_2d.py +``` + +`./configs/vit/vit_2d.py` is a config file, which is introduced in the [Config file](config.md) section below. These +config files are used by ColossalAI to define all kinds of training arguments, such as the model, dataset and training +method (optimizer, lr_scheduler, epoch, etc.). Config files are highly customizable and can be modified so as to train +different models. +`./example/run_trainer.py` contains a standard training script and is presented below, it reads the config file and +realizes the training process. + +```python +import colossalai +from colossalai.engine import Engine +from colossalai.trainer import Trainer +from colossalai.core import global_context as gpc + +model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize() +engine = Engine( + model=model, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule +) + +trainer = Trainer(engine=engine, + hooks_cfg=gpc.config.hooks, + verbose=True) +trainer.fit( + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + max_epochs=gpc.config.num_epochs, + display_progress=True, + test_interval=5 +) +``` + +Alternatively, the `model` variable can be substituted with a self-defined model or a pre-defined model in our Model +Zoo. The detailed substitution process is elaborated [here](model.md). + +## Features + +ColossalAI provides a collection of parallel training components for you. We aim to support you with your development of +distributed deep learning models just like how you write single-GPU deeo learning models. We provide friendly tools to +kickstart distributed training in a few lines. + +- [Data Parallelism](parallelization.md) +- [Pipeline Parallelism](parallelization.md) +- [1D, 2D, 2.5D, 3D and sequence parallelism](parallelization.md) +- [Friendly trainer and engine](trainer_engine.md) +- [Extensible for new parallelism](add_your_parallel.md) +- [Mixed Precision Training](amp.md) +- [Zero Redundancy Optimizer (ZeRO)](zero.md) diff --git a/docs/trainer_engine.md b/docs/trainer_engine.md new file mode 100644 index 000000000..bf0ee2f13 --- /dev/null +++ b/docs/trainer_engine.md @@ -0,0 +1,90 @@ +# Build your engine & Customize your trainer + +## Build your engine + +To better understand the function of `Engine` class, you should know the conception of the process function in common engines. The process function usually controls the behavior over a batch of a dataset, `Engine` class just controls the process function. For example, common process function looks like this: + +```python +def process_function(dataloader, model, criterion, optim): + optim.zero_grad() + data, label = next(dataloader) + output = model(data) + loss = criterion(output, label) + loss.backward() + optim.setp() +``` + +In `ignite.engine` or `keras.engine`, the process function is always provided by users. However, it is hard for users to write their own functions for pipeline parallelism. Aiming at accessible hybrid parallelism for users, we provide powerful `Engine` class. It enables pipeline parallelism and offers 1F1B non-interleaving strategy. Also, you can use pre-defined learning rate scheduler in your `Engine` to adjust learning rate during training. + +In order to build your engine, just set model, criterion, optimizer, learning rate scheduler and schedule. Consider the following code as an example. + +```python +import torch +import torch.nn as nn +import torchvision.models as models +import colossalai + + +model = models.resnet18() +criterion = nn.CrossEntropyLoss() +optimizer = torch.optim.Adam(model) +lr_scheduler = colossalai.nn.lr_scheduler.CosineAnnealingLR(optimizer, 1000) +schedule = colossalai.engine.schedule.NoPipelineSchedule() + +MyEngine = Engine( + model=model, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule +) +``` + +More information is in API reference. + + + +## Customize your trainer + +### Overview + +Before starting to learn how to customize a trainer meeting your need, you should have a basic understanding about the function of `Trainer`. We recommend you to read *Get Started* section and *Build your engine* first. + +Trainer class tends to enable researchers and engineers to use our framework more conveniently, instead of writing their own scripts, we provide `Trainer` class and you can simply construct it with your own `Engine` by calling `MyTrainer = Trainer(MyEngine)`. Then use method `fit` to train or evaluate your model. In order to make our `Trainer` class more powerful, we add some useful features to it, such as monitor or record running states and metrics which indicate model's performance, or save after a training epoch. + +To accomplish that, specific actions must be added to the training or evaluation. `BaseHook` class allow you to add desired actions in specific time points. We have already created practical hooks for those useful features. What you need to do is just picking the hooks you want. + +More detailed class descriptions can be found in API reference. + +### Example + +```python +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='LogTimingByEpochHook'), + dict(type='LogMemoryByEpochHook'), + dict(type='AccuracyHook'), + dict(type='LossHook'), + # dict(type='TensorboardHook', log_dir='./tfb_logs'), + # dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] +``` + +Above hooks will record metrics, used time and memory usage to log every epoch. Also it prints loss and accuracy to let users monitor the performance of the model. + +### Hook + +You can extend our `BaseHook` class. Hooks can be called at twelve time points. More detailed information can be found in API reference. + +Or extend from `MetricHook` to write a metric collector. You should also use the decorator `@HOOKS.register_module` for your own hook class, and import it in your main python script. + +For `after_train_iter()`, it receives the output of engine per iteration, which is a list including output, label and loss. + +Note that you can define the priority to arrange the execution order of all hooks. + +### Metric + +You can write your own metric by extending `Metric` class. It is always used with `MetricHook`. If you write your own metric hooks, please set the priority carefully and make sure is called before other hooks which may use the results of metrics. + +We've already provided some metric hooks. We store metric objects in `runner.states['metrics']`. It is a dictionary and you can use the name of the metric to access it. \ No newline at end of file diff --git a/docs/zero.md b/docs/zero.md new file mode 100644 index 000000000..f81745ac5 --- /dev/null +++ b/docs/zero.md @@ -0,0 +1,81 @@ +# Zero Redundancy Optimizer and Zero Offload + +The Zero Redundancy Optimizer (ZeRO) removes the memory redundancies across data-parallel processes by partitioning the three model states (optimizer states, gradients, and parameters) across data-parallel processes instead of replicating them. By doing this, it boosts memory efficiency compared to classic data-parallelism while retaining its computational granularity and communication efficiency. + +1. **ZeRO Level 1**: The optimizer states (e.g., for [Adam optimizer](https://arxiv.org/abs/1412.6980), 32-bit weights, and the first, and second moment estimates) are partitioned across the processes, so that each process updates only its partition. +2. **ZeRO Level 2**: The reduced 32-bit gradients for updating the model weights are also partitioned such that each process retains only the gradients corresponding to its portion of the optimizer states. +3. **ZeRO Level 3**: The 16-bit model parameters are partitioned across the processes. ZeRO-3 will automatically collect and partition them during the forward and backward passes. + +## Getting Started + +Once you are training with ColossalAI, enabling ZeRO-3 offload is as simple as enabling it in your ColossalAI configuration! Below are a few examples of ZeRO-3 configurations. + +### Example ZeRO-3 Configurations + +Here we use ``Adam`` as the initial optimizer. + +1. Use ZeRO to partition the optimizer states (level 1), gradients (level 2), and parameters (level 3). + ```python + optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 + ) + + zero = dict( + type='ZeroRedundancyOptimizer_Level_3', + dynamic_loss_scale=True, + clip_grad=1.0 + ) + ``` +2. Additionally offload the optimizer states and computations to the CPU. + ```python + zero = dict( + offload_optimizer_config=dict( + device='cpu', + pin_memory=True, + fast_init=True + ), + ... + ) + ``` +3. Save even more memory by offloading parameters to the CPU memory. + ```python + zero = dict( + offload_optimizer_config=dict( + device='cpu', + pin_memory=True, + fast_init=True + ), + offload_param_config=dict( + device='cpu', + pin_memory=True, + fast_init=OFFLOAD_PARAM_MAX_IN_CPU + ), + ... + ) + ``` +4. Save even MORE memory by offloading to NVMe (if available on your system): + ```python + zero = dict( + offload_optimizer_config=dict( + device='nvme', + pin_memory=True, + fast_init=True, + nvme_path='/nvme_data' + ), + offload_param_config=dict( + device='nvme', + pin_memory=True, + max_in_cpu=OFFLOAD_PARAM_MAX_IN_CPU, + nvme_path='/nvme_data' + ), + ... + ) + ``` + +Note that ``fp16`` is automatically enabled when using ZeRO. + +### Training + +Once you complete your configuration, just use `colossalai.initialize()` to initialize your training. All you need to do is to write your configuration. \ No newline at end of file diff --git a/examples/colossal_cifar_demo.ipynb b/examples/colossal_cifar_demo.ipynb new file mode 100644 index 000000000..2ad9022c9 --- /dev/null +++ b/examples/colossal_cifar_demo.ipynb @@ -0,0 +1,370 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "colossal_cifar_demo.ipynb", + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "uhrbvVEh2iJd" + }, + "source": [ + "# Train an image classifier\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "vP7LvCpG23a2", + "outputId": "b37f7203-8a02-4736-c527-603f2bb34d7d" + }, + "source": [ + "!pip install ColossalAI deepspeed" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Requirement already satisfied: ColossalAI in /usr/local/lib/python3.7/dist-packages (0.1)\n", + "Requirement already satisfied: deepspeed in /usr/local/lib/python3.7/dist-packages (0.5.4)\n", + "Requirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from deepspeed) (21.0)\n", + "Requirement already satisfied: triton in /usr/local/lib/python3.7/dist-packages (from deepspeed) (1.1.1)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from deepspeed) (4.62.3)\n", + "Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from deepspeed) (1.19.5)\n", + "Requirement already satisfied: tensorboardX==1.8 in /usr/local/lib/python3.7/dist-packages (from deepspeed) (1.8)\n", + "Requirement already satisfied: ninja in /usr/local/lib/python3.7/dist-packages (from deepspeed) (1.10.2.2)\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.7/dist-packages (from deepspeed) (1.9.0+cu111)\n", + "Requirement already satisfied: psutil in /usr/local/lib/python3.7/dist-packages (from deepspeed) (5.4.8)\n", + "Requirement already satisfied: protobuf>=3.2.0 in /usr/local/lib/python3.7/dist-packages (from tensorboardX==1.8->deepspeed) (3.17.3)\n", + "Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from tensorboardX==1.8->deepspeed) (1.15.0)\n", + "Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->deepspeed) (2.4.7)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch->deepspeed) (3.7.4.3)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from triton->deepspeed) (3.3.0)\n" + ] + } + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "UVKEurtS4SFS", + "outputId": "99fb6050-5da7-4f27-b4eb-9b3ccf830efb" + }, + "source": [ + "import colossalai\n", + "from colossalai.engine import Engine, NoPipelineSchedule\n", + "from colossalai.trainer import Trainer\n", + "from colossalai.context import Config\n", + "import torch" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Please install apex to use FP16 Optimizer\n", + "Apex should be installed to use the FP16 optimizer\n", + "apex is required for mixed precision training\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PpFfhNBD7NSn" + }, + "source": [ + "First, we should initialize distributed environment. Though we just use single GPU in this example, we still need initialize distributed environment for compatibility. We just consider the simplest case here, so we just set the number of parallel processes to 1." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "8yF7Lc-K7NAS", + "outputId": "01312349-a8b0-4de4-9103-7d1b48e6cc36" + }, + "source": [ + "parallel_cfg = Config(dict(parallel=dict(\n", + " data=dict(size=1),\n", + " pipeline=dict(size=1),\n", + " tensor=dict(size=1, mode=None),\n", + ")))\n", + "colossalai.init_dist(config=parallel_cfg,\n", + " local_rank=0,\n", + " world_size=1,\n", + " host='127.0.0.1',\n", + " port=8888,\n", + " backend='nccl')" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "colossalai - torch.distributed.distributed_c10d - 2021-10-15 03:27:51,596 INFO: Added key: store_based_barrier_key:1 to store for rank: 0\n", + "colossalai - torch.distributed.distributed_c10d - 2021-10-15 03:27:51,598 INFO: Rank 0: Completed store-based barrier for 1 nodes.\n", + "colossalai - torch.distributed.distributed_c10d - 2021-10-15 03:27:51,602 INFO: Added key: store_based_barrier_key:2 to store for rank: 0\n", + "colossalai - torch.distributed.distributed_c10d - 2021-10-15 03:27:51,605 INFO: Rank 0: Completed store-based barrier for 1 nodes.\n", + "colossalai - torch.distributed.distributed_c10d - 2021-10-15 03:27:51,608 INFO: Added key: store_based_barrier_key:3 to store for rank: 0\n", + "colossalai - torch.distributed.distributed_c10d - 2021-10-15 03:27:51,610 INFO: Rank 0: Completed store-based barrier for 1 nodes.\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "process rank 0 is bound to device 0\n", + "initialized seed on rank 0, numpy: 1024, python random: 1024, ParallelMode.DATA: 1024, ParallelMode.TENSOR: 1124,the default parallel seed is ParallelMode.DATA.\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ppjmMxc_81TK" + }, + "source": [ + "Load and normalize the CIFAR10 training and test datasets using `colossalai.nn.data`. Note that we have wrapped `torchvision.transforms`, so that we can simply use the config dict to use them." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZyGhyD47-dUY", + "colab": { + "base_uri": "https://localhost:8080/" + }, + "outputId": "98bbf2d1-a1c4-4bb4-b6df-600777b1e8f5" + }, + "source": [ + "transform_cfg = [\n", + " dict(type='ToTensor'),\n", + " dict(type='Normalize',\n", + " mean=[0.4914, 0.4822, 0.4465],\n", + " std=[0.2023, 0.1994, 0.2010]),\n", + "]\n", + "\n", + "batch_size = 128\n", + "\n", + "trainset = colossalai.nn.data.CIFAR10Dataset(transform_cfg, root='./data', train=True)\n", + "trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\n", + "\n", + "testset = colossalai.nn.data.CIFAR10Dataset(transform_cfg, root='./data', train=False)\n", + "testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Files already downloaded and verified\n", + "Files already downloaded and verified\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NvPbfLLR9NzC" + }, + "source": [ + "We just define a simple Convolutional Neural Network here." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "cQ_y7lBG09LS" + }, + "source": [ + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "\n", + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.conv1 = nn.Conv2d(3, 6, 5)\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " self.conv2 = nn.Conv2d(6, 16, 5)\n", + " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", + " self.fc2 = nn.Linear(120, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x = self.pool(F.relu(self.conv2(x)))\n", + " x = torch.flatten(x, 1) # flatten all dimensions except batch\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", + "\n", + "\n", + "model = Net().cuda()" + ], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tgsszAmM9dYZ" + }, + "source": [ + "Define a Loss function and optimizer. And then we use them to initialize `Engine` and `Trainer`. We provide various training / evaluating hooks. In this case, we just use the simplest hooks which can compute and print loss and accuracy." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "YtaDoCax1BCf", + "outputId": "b33b1641-03d8-4597-c8c2-1a4c1d61e9b0" + }, + "source": [ + "import torch.optim as optim\n", + "\n", + "criterion = nn.CrossEntropyLoss()\n", + "optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n", + "schedule = NoPipelineSchedule()\n", + "engine = Engine(\n", + " model=model,\n", + " criterion=criterion,\n", + " optimizer=optimizer,\n", + " lr_scheduler=None,\n", + " schedule=schedule\n", + " )\n", + "trainer = Trainer(engine=engine,\n", + " hooks_cfg=[dict(type='LossHook'), dict(type='LogMetricByEpochHook'), dict(type='AccuracyHook')],\n", + " verbose=True)" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "colossalai - rank_0 - 2021-10-15 03:27:56,018 WARNING: No gradient handler is set up, please make sure you do not need to all-reduce the gradients after a training step.\n", + "colossalai - rank_0 - 2021-10-15 03:27:56,024 INFO: build LogMetricByEpochHook for train, priority = 1\n", + "colossalai - rank_0 - 2021-10-15 03:27:56,026 INFO: build LossHook for train, priority = 10\n", + "colossalai - rank_0 - 2021-10-15 03:27:56,029 INFO: build AccuracyHook for train, priority = 10\n" + ] + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_JR2TuvH99Ik" + }, + "source": [ + "Then we set training configs. We train our model for 10 epochs and it will be evaluated every 1 epoch. Set `display_progress` to `True` to display the training / evaluating progress bar." + ] + }, + { + "cell_type": "code", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "w-J3IP-J1sfx", + "outputId": "bdb76939-04f1-4124-ce5e-3af44c0d902c" + }, + "source": [ + "num_epochs = 10\n", + "test_interval = 1\n", + "trainer.fit(\n", + " train_dataloader=trainloader,\n", + " test_dataloader=testloader,\n", + " max_epochs=num_epochs,\n", + " display_progress=True,\n", + " test_interval=test_interval\n", + " )" + ], + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "[Epoch 0 train]: 0%| | 0/391 [00:00 b (h w) (p1 p2 c)', + p1=patch_size, p2=patch_size), + linear((patch_size ** 2) * channels, dim), + *[nn.Sequential( + PreNormResidual(dim, FeedForward( + num_patches, expansion_factor, dropout, chan_first)), + PreNormResidual(dim, FeedForward( + dim, expansion_factor, dropout, chan_last)) + ) for _ in range(depth)], + norm_layer(dim), + Reduce('b n c -> b c', 'mean'), + linear(dim, num_classes) + ) diff --git a/model_zoo/vit/__init__.py b/model_zoo/vit/__init__.py new file mode 100644 index 000000000..6e009854d --- /dev/null +++ b/model_zoo/vit/__init__.py @@ -0,0 +1,2 @@ +from .parallel_2d import * +from .parallel_3d import * diff --git a/model_zoo/vit/parallel_1d/.init b/model_zoo/vit/parallel_1d/.init new file mode 100644 index 000000000..e69de29bb diff --git a/model_zoo/vit/parallel_2d/__init__.py b/model_zoo/vit/parallel_2d/__init__.py new file mode 100644 index 000000000..5e5f1941d --- /dev/null +++ b/model_zoo/vit/parallel_2d/__init__.py @@ -0,0 +1 @@ +from .vit import * \ No newline at end of file diff --git a/model_zoo/vit/parallel_2d/vit.py b/model_zoo/vit/parallel_2d/vit.py new file mode 100644 index 000000000..18a1dfb0f --- /dev/null +++ b/model_zoo/vit/parallel_2d/vit.py @@ -0,0 +1,219 @@ +from colossalai.context import ParallelMode, seed +from colossalai import nn as clsl_nn +from colossalai.registry import MODELS +from torch import nn +import torch + + +__all__ = [ + 'VisionTransformer2D', + 'vit_tiny_2d_patch4_32', + 'vit_tiny_2d_patch16_224', + 'vit_tiny_2d_patch16_384', + 'vit_small_2d_patch16_224', + 'vit_small_2d_patch16_384', + 'vit_small_2d_patch32_224', + 'vit_small_2d_patch32_384', + 'vit_base_2d_patch16_224', + 'vit_base_2d_patch16_384', + 'vit_base_2d_patch32_224', + 'vit_base_2d_patch32_384', + 'vit_large_2d_patch16_224', + 'vit_large_2d_patch16_384', + 'vit_large_2d_patch32_224', + 'vit_large_2d_patch32_384', +] + + +class ViTBlock2D(nn.Module): + + def __init__(self, + dim: int, + num_heads: int, + mlp_ratio: int = 4, + drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + act_layer: str = 'gelu'): + super().__init__() + self.norm1 = clsl_nn.LayerNorm2D(dim, eps=1e-6) + self.attn = clsl_nn.ViTSelfAttention2D(dim, num_heads, attn_drop, drop) + self.drop_path = clsl_nn.VanillaViTDropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + self.norm2 = clsl_nn.LayerNorm2D(dim, eps=1e-6) + self.mlp = clsl_nn.ViTMLP2D(dim, mlp_ratio, act_layer, drop) + + def forward(self, x): + y = self.attn(self.norm1(x)) + with seed(ParallelMode.TENSOR): + x = x + self.drop_path(y) + y = self.mlp(self.norm2(x)) + with seed(ParallelMode.TENSOR): + x = x + self.drop_path(y) + return x + + +@MODELS.register_module +class VisionTransformer2D(nn.Module): + + def __init__(self, + img_size: int = 224, + patch_size: int = 16, + in_chans: int = 3, + num_classes: int = 1000, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: int = 4, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + act_layer: str = 'gelu'): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = clsl_nn.ViTPatchEmbedding2D( + img_size, patch_size, embed_dim, in_chans + ) + + self.splitter = clsl_nn.ViTInputSplitter2D() + + self.token_fuser = clsl_nn.ViTTokenFuser2D( + img_size, patch_size, embed_dim, drop_rate + ) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + self.blocks = nn.Sequential(*[ + ViTBlock2D(embed_dim, num_heads, mlp_ratio, drop_rate, + attn_drop_rate, dpr[i], act_layer) + for i in range(depth) + ]) + + self.norm = clsl_nn.LayerNorm2D(embed_dim, eps=1e-6) + self.head = clsl_nn.ViTHead2D(self.num_features, num_classes) if num_classes > 0 \ + else nn.Identity() + + self.init_weights() + + def init_weights(self): + pass + + def forward(self, x): + x = self.patch_embed(x) + x = self.splitter(x) + x = self.token_fuser(x) + x = self.blocks(x) + x = self.norm(x) + x = self.head(x) + return x + + +def _create_vit_model(**model_kwargs): + model = VisionTransformer2D(**model_kwargs) + return model + + +@MODELS.register_module +def vit_tiny_2d_patch4_32(**kwargs): + model_kwargs = dict(img_size=32, patch_size=4, embed_dim=512, + depth=6, num_heads=8, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_tiny_2d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=192, + depth=12, num_heads=3, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_tiny_2d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, embed_dim=192, + depth=12, num_heads=3, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_2d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=384, + depth=12, num_heads=6, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_2d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, embed_dim=384, + depth=12, num_heads=6, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_2d_patch32_224(**kwargs): + model_kwargs = dict(patch_size=32, embed_dim=384, + depth=12, num_heads=6, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_2d_patch32_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=32, embed_dim=384, + depth=12, num_heads=6, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_2d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=768, + depth=12, num_heads=12, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_2d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, embed_dim=768, + depth=12, num_heads=12, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_2d_patch32_224(**kwargs): + model_kwargs = dict(patch_size=32, embed_dim=768, + depth=12, num_heads=12, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_2d_patch32_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=32, embed_dim=768, + depth=12, num_heads=12, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_2d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=1024, + depth=24, num_heads=16, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_2d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, embed_dim=1024, + depth=24, num_heads=16, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_2d_patch32_224(**kwargs): + model_kwargs = dict(patch_size=32, embed_dim=1024, + depth=24, num_heads=16, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_2d_patch32_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=32, embed_dim=1024, + depth=24, num_heads=16, **kwargs) + return _create_vit_model(**model_kwargs) \ No newline at end of file diff --git a/model_zoo/vit/parallel_2p5d/.init b/model_zoo/vit/parallel_2p5d/.init new file mode 100644 index 000000000..e69de29bb diff --git a/model_zoo/vit/parallel_3d/__init__.py b/model_zoo/vit/parallel_3d/__init__.py new file mode 100644 index 000000000..a547126b2 --- /dev/null +++ b/model_zoo/vit/parallel_3d/__init__.py @@ -0,0 +1 @@ +from .vit import * diff --git a/model_zoo/vit/parallel_3d/vit.py b/model_zoo/vit/parallel_3d/vit.py new file mode 100644 index 000000000..242409444 --- /dev/null +++ b/model_zoo/vit/parallel_3d/vit.py @@ -0,0 +1,209 @@ +import torch +from torch import nn + +from colossalai import nn as col_nn +from colossalai.context import ParallelMode +from colossalai.registry import MODELS + +__all__ = [ + 'VisionTransformer3D', + 'vit_tiny_3d_patch4_32', + 'vit_tiny_3d_patch16_224', + 'vit_tiny_3d_patch16_384', + 'vit_small_3d_patch16_224', + 'vit_small_3d_patch16_384', + 'vit_small_3d_patch32_224', + 'vit_small_3d_patch32_384', + 'vit_base_3d_patch16_224', + 'vit_base_3d_patch16_384', + 'vit_base_3d_patch32_224', + 'vit_base_3d_patch32_384', + 'vit_large_3d_patch16_224', + 'vit_large_3d_patch16_384', + 'vit_large_3d_patch32_224', + 'vit_large_3d_patch32_384', +] + + +class ViTBlock3D(nn.Module): + def __init__(self, + dim: int, + num_heads: int, + hidden_dim: int, + drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0.): + super().__init__() + self.norm1 = col_nn.LayerNorm3D( + dim, ParallelMode.PARALLEL_3D_INPUT, ParallelMode.PARALLEL_3D_WEIGHT, eps=1e-6) + self.attn = col_nn.ViTSelfAttention3D(dim, num_heads, attn_drop, drop) + self.drop_path = col_nn.VanillaViTDropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = col_nn.LayerNorm3D(dim, ParallelMode.PARALLEL_3D_INPUT, ParallelMode.PARALLEL_3D_WEIGHT, eps=1e-6) + self.mlp = col_nn.ViTMLP3D(hidden_dim, 1, drop, 'gelu') + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +@MODELS.register_module +class VisionTransformer3D(nn.Module): + def __init__(self, + img_size: int = 224, + patch_size: int = 16, + in_chans: int = 3, + num_classes: int = 1000, + depth: int = 12, + num_heads: int = 12, + embed_dim: int = 768, + hidden_dim: int = 3072, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0.): + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim + + self.patch_embed = col_nn.ViTPatchEmbedding3D( + img_size, + patch_size, + in_chans, + embed_dim, + drop_rate, + ) + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + self.blocks = nn.Sequential(*[ + ViTBlock3D(embed_dim, num_heads, hidden_dim, + drop_rate, attn_drop_rate, dpr[i]) + for i in range(depth) + ]) + + self.norm = col_nn.LayerNorm3D(embed_dim, ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_WEIGHT) + + self.head = col_nn.ViTHead3D(hidden_dim, num_classes) + self.init_weights() + + def init_weights(self): + pass + + def forward(self, x): + x = self.patch_embed(x) + x = self.blocks(x) + x = self.norm(x) + x = self.head(x) + return x + + +def _create_vit_model(**model_kwargs): + model = VisionTransformer3D(**model_kwargs) + return model + + +@MODELS.register_module +def vit_tiny_3d_patch4_32(**kwargs): + model_kwargs = dict(img_size=32, patch_size=4, embed_dim=512, + depth=6, num_heads=8, hidden_dim=512, num_classes=10, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_tiny_3d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=192, + depth=12, num_heads=3, hidden_dim=768, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_tiny_3d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, + embed_dim=192, depth=12, num_heads=3, hidden_dim=768, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_3d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=384, + depth=12, num_heads=6, hidden_dim=1536, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_3d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, + embed_dim=384, depth=12, num_heads=6, hidden_dim=1536, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_3d_patch32_224(**kwargs): + model_kwargs = dict(patch_size=32, embed_dim=384, + depth=12, num_heads=6, hidden_dim=1536, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_small_3d_patch32_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=32, + embed_dim=384, depth=12, num_heads=6, hidden_dim=1536, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_3d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=768, + depth=12, num_heads=12, hidden_dim=3072, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_3d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, + embed_dim=768, depth=12, num_heads=12, hidden_dim=3072, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_3d_patch32_224(**kwargs): + model_kwargs = dict(patch_size=32, embed_dim=768, + depth=12, num_heads=12, hidden_dim=3072, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_base_3d_patch32_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=32, + embed_dim=768, depth=12, num_heads=12, hidden_dim=3072, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_3d_patch16_224(**kwargs): + model_kwargs = dict(patch_size=16, embed_dim=1024, + depth=24, num_heads=16, hidden_dim=4096, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_3d_patch16_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=16, + embed_dim=1024, depth=24, num_heads=16, hidden_dim=4096, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_3d_patch32_224(**kwargs): + model_kwargs = dict(patch_size=32, embed_dim=1024, + depth=24, num_heads=16, hidden_dim=4096, **kwargs) + return _create_vit_model(**model_kwargs) + + +@MODELS.register_module +def vit_large_3d_patch32_384(**kwargs): + model_kwargs = dict(img_size=384, patch_size=32, + embed_dim=1024, depth=24, num_heads=16, hidden_dim=4096, **kwargs) + return _create_vit_model(**model_kwargs) diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 000000000..ac31ace4b --- /dev/null +++ b/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +markers = + cpu: tests which can run on CPU + gpu: tests which requires a single GPU + dist: tests which are run in a multi-GPU or multi-machine environment + experiment: tests for experimental features \ No newline at end of file diff --git a/requirements/requirements-test.txt b/requirements/requirements-test.txt new file mode 100644 index 000000000..69b82ff84 --- /dev/null +++ b/requirements/requirements-test.txt @@ -0,0 +1,3 @@ +pytest +rpyc +matplotlib \ No newline at end of file diff --git a/requirements/requirements.txt b/requirements/requirements.txt new file mode 100644 index 000000000..3d8f76698 --- /dev/null +++ b/requirements/requirements.txt @@ -0,0 +1,7 @@ +torch>=1.8 +torchvision>=0.9 +numpy +tqdm +psutil +tensorboardX +packaging \ No newline at end of file diff --git a/scripts/slurm_dist_train.sh b/scripts/slurm_dist_train.sh new file mode 100644 index 000000000..1d3d505f3 --- /dev/null +++ b/scripts/slurm_dist_train.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env sh + + +main_file=$1 +config_file=$2 + +python $main_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 --config $config_file + +# how to run this script +# exmaple: +# HOST=IP_ADDR srun ./scripts/slurm_dist_train.sh ./examples/train_vit_2d.py ./configs/vit/vit_2d.py \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 000000000..8c6e1ef64 --- /dev/null +++ b/setup.py @@ -0,0 +1,175 @@ +import os +import subprocess +import sys +import warnings + +import torch +from setuptools import setup, find_packages +from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME + +# ninja build does not work unless include_dirs are abs path +this_dir = os.path.dirname(os.path.abspath(__file__)) + + +def get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +if not torch.cuda.is_available(): + # https://github.com/NVIDIA/apex/issues/486 + # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(), + # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command). + print('\nWarning: Torch did not find available GPUs on this system.\n', + 'If your intention is to cross-compile, this is not an error.\n' + 'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n' + 'Volta (compute capability 7.0), Turing (compute capability 7.5),\n' + 'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n' + 'If you wish to cross-compile for a single specific architecture,\n' + 'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n') + if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None: + _, bare_metal_major, _ = get_cuda_bare_metal_version(CUDA_HOME) + if int(bare_metal_major) == 11: + os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0" + else: + os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5" + +print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__)) +TORCH_MAJOR = int(torch.__version__.split('.')[0]) +TORCH_MINOR = int(torch.__version__.split('.')[1]) + +if TORCH_MAJOR == 0 and TORCH_MINOR < 4: + raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" + + "The latest stable release can be obtained from https://pytorch.org/") + +cmdclass = {} +ext_modules = [] + +extras = {} +if "--pyprof" in sys.argv: + string = "\n\nPyprof has been moved to its own dedicated repository and will " + \ + "soon be removed from Apex. Please visit\n" + \ + "https://github.com/NVIDIA/PyProf\n" + \ + "for the latest version." + warnings.warn(string, DeprecationWarning) + with open('requirements.txt') as f: + required_packages = f.read().splitlines() + extras['pyprof'] = required_packages + try: + sys.argv.remove("--pyprof") + except: + pass +else: + warnings.warn( + "Option --pyprof not specified. Not installing PyProf dependencies!") + +if "--cuda_ext" in sys.argv: + if TORCH_MAJOR == 0: + raise RuntimeError("--cuda_ext requires Pytorch 1.0 or later, " + "found torch.__version__ = {}".format(torch.__version__)) + + +def get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def check_cuda_torch_binary_vs_bare_metal(cuda_dir): + raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version( + cuda_dir) + torch_binary_major = torch.version.cuda.split(".")[0] + torch_binary_minor = torch.version.cuda.split(".")[1] + + print("\nCompiling cuda extensions with") + print(raw_output + "from " + cuda_dir + "/bin\n") + + if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor): + raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " + + "not match the version used to compile Pytorch binaries. " + + "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) + + "In some cases, a minor-version mismatch will not cause later errors: " + + "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. " + "You can try commenting out this check (at your own risk).") + + +# Set up macros for forward/backward compatibility hack around +# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e +# and +# https://github.com/NVIDIA/apex/issues/456 +# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac +version_ge_1_1 = [] +if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): + version_ge_1_1 = ['-DVERSION_GE_1_1'] +version_ge_1_3 = [] +if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): + version_ge_1_3 = ['-DVERSION_GE_1_3'] +version_ge_1_5 = [] +if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4): + version_ge_1_5 = ['-DVERSION_GE_1_5'] +version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5 + +if "--cuda_ext" in sys.argv: + sys.argv.remove("--cuda_ext") + + if CUDA_HOME is None: + raise RuntimeError( + "--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.") + else: + check_cuda_torch_binary_vs_bare_metal(CUDA_HOME) + + ext_modules.append( + CUDAExtension(name='colossal_C', + sources=['csrc/colossal_C_frontend.cpp', + 'csrc/multi_tensor_sgd_kernel.cu', + 'csrc/multi_tensor_scale_kernel.cu', + 'csrc/multi_tensor_adam.cu', + 'csrc/multi_tensor_l2norm_kernel.cu', + 'csrc/multi_tensor_lamb.cu'], + extra_compile_args={'cxx': ['-O3'] + version_dependent_macros, + 'nvcc': ['-lineinfo', + '-O3', + # '--resource-usage', + '--use_fast_math'] + version_dependent_macros})) + +# Check, if ATen/CUDAGenerator.h is found, otherwise use the new ATen/CUDAGeneratorImpl.h, due to breaking change in https://github.com/pytorch/pytorch/pull/36026 +generator_flag = [] +torch_dir = torch.__path__[0] +if os.path.exists(os.path.join(torch_dir, 'include', 'ATen', 'CUDAGenerator.h')): + generator_flag = ['-DOLD_GENERATOR'] + + +def fetch_requirements(path): + with open(path, 'r') as fd: + return [r.strip() for r in fd.readlines()] + + +install_requires = fetch_requirements('requirements/requirements.txt') + +setup( + name='colossal-ai', + version='0.0.1-beta', + packages=find_packages(exclude=('csrc', + 'tests', + 'docs', + 'tests', + '*.egg-info',)), + description='An integrated large-scale model training framework with efficient parallelization techniques', + ext_modules=ext_modules, + cmdclass={'build_ext': BuildExtension} if ext_modules else {}, + extras_require=extras, + install_requires=install_requires, +) diff --git a/tests/test_config/sample_config.py b/tests/test_config/sample_config.py new file mode 100644 index 000000000..e48c70e14 --- /dev/null +++ b/tests/test_config/sample_config.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +import os +from pathlib import Path + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + download=True, + transform_pipeline=[ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + ] + ), + dataloader=dict( + batch_size=64, + pin_memory=True, + num_workers=4, + sampler=dict( + type='DataParallelSampler', + shuffle=True, + ) + ) +) diff --git a/tests/test_config/test_load_config.py b/tests/test_config/test_load_config.py new file mode 100644 index 000000000..550af2a4a --- /dev/null +++ b/tests/test_config/test_load_config.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from pathlib import Path + +import pytest + +from colossalai.context.config import Config + + +@pytest.mark.cpu +def test_load_config(): + filename = Path(__file__).parent.joinpath('sample_config.py') + config = Config.from_file(filename) + + assert config.train_data, 'cannot access train data as attribute' + assert config.train_data.dataset, 'cannot access grandchild attribute' + assert isinstance(config.train_data.dataset.transform_pipeline[0], dict), \ + f'expected attribute transform_pipeline elements to be a dict, but found {type(config.train_data.dataset.transform_pipeline)}' diff --git a/tests/test_context/configs/parallel_2d_init.py b/tests/test_context/configs/parallel_2d_init.py new file mode 100644 index 000000000..6af884450 --- /dev/null +++ b/tests/test_context/configs/parallel_2d_init.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +parallel = dict( + pipeline=dict(size=2), + tensor=dict( + size=4, + mode='2d' + ) +) diff --git a/tests/test_context/configs/parallel_2p5d_init.py b/tests/test_context/configs/parallel_2p5d_init.py new file mode 100644 index 000000000..c2d896d38 --- /dev/null +++ b/tests/test_context/configs/parallel_2p5d_init.py @@ -0,0 +1,11 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +parallel = dict( + pipeline=dict(size=2), + tensor=dict( + size=8, + depth=2, + mode='2.5d' + ) +) diff --git a/tests/test_context/configs/parallel_3d_init.py b/tests/test_context/configs/parallel_3d_init.py new file mode 100644 index 000000000..0ec724f8b --- /dev/null +++ b/tests/test_context/configs/parallel_3d_init.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +parallel = dict( + pipeline=dict(size=2), + tensor=dict( + size=8, + mode='3d' + ) +) diff --git a/tests/test_context/test_2d_init.py b/tests/test_context/test_2d_init.py new file mode 100644 index 000000000..24e0749ae --- /dev/null +++ b/tests/test_context/test_2d_init.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from functools import partial +from pathlib import Path + +import pytest +import torch.multiprocessing as mp + +from colossalai import init_dist +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + +CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2d_init.py').absolute() + + +def check_data_parallel_rank(rank): + if rank in [0, 1, 2, 3, 4, 5, 6, 7]: + assert gpc.get_local_rank(ParallelMode.DATA) == 0 + elif rank in [8, 9, 10, 11, 12, 13, 14, 15]: + assert gpc.get_local_rank(ParallelMode.DATA) == 1 + + +def check_pipeline_parallel_rank(rank): + if rank in [0, 1, 2, 3]: + assert gpc.get_local_rank(ParallelMode.PIPELINE) == 0 + elif rank in [4, 5, 6, 7]: + assert gpc.get_local_rank(ParallelMode.PIPELINE) == 1 + elif rank in [8, 9, 10, 11]: + assert gpc.get_local_rank(ParallelMode.PIPELINE) == 0 + elif rank in [12, 13, 14, 15]: + assert gpc.get_local_rank(ParallelMode.PIPELINE) == 1 + + +def check_tensor_parallel_rank(rank): + if rank in [0, 4, 8, 12]: + assert gpc.get_local_rank(ParallelMode.TENSOR) == 0 + elif rank in [1, 5, 9, 13]: + assert gpc.get_local_rank(ParallelMode.TENSOR) == 1 + elif rank in [2, 6, 10, 14]: + assert gpc.get_local_rank(ParallelMode.TENSOR) == 2 + elif rank in [3, 7, 11, 15]: + assert gpc.get_local_rank(ParallelMode.TENSOR) == 3 + + +def check_2d_parallel_rank(rank): + if rank in [0, 4, 8, 12]: + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0 + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 0 + elif rank in [1, 5, 9, 13]: + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0 + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 1 + elif rank in [2, 6, 10, 14]: + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 1 + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 0 + elif rank in [3, 7, 11, 15]: + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 1 + assert gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) == 1 + + +def init_2d(local_rank, world_size, backend, port, host): + dist_args = dict( + config=CONFIG_PATH, + local_rank=local_rank, + world_size=world_size, + backend=backend, + port=port, + host=host + ) + init_dist(**dist_args) + + check_tensor_parallel_rank(local_rank) + check_data_parallel_rank(local_rank) + check_2d_parallel_rank(local_rank) + check_pipeline_parallel_rank(local_rank) + + gpc.destroy() + + +@pytest.mark.cpu +def test_2d_init(): + """ + As no computation or communication is done, we can run this test on CPU. + """ + world_size = 16 + test_fn = partial(init_2d, + world_size=world_size, + backend='gloo', + port='29500', + host='localhost' + ) + mp.spawn(test_fn, nprocs=world_size) + + +if __name__ == '__main__': + test_2d_init() diff --git a/tests/test_context/test_2p5d_init.py b/tests/test_context/test_2p5d_init.py new file mode 100644 index 000000000..26de7f7ff --- /dev/null +++ b/tests/test_context/test_2p5d_init.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from functools import partial +from pathlib import Path + +import pytest +import torch.multiprocessing as mp + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.initialize import init_dist + +CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_2p5d_init.py').absolute() + + +def check_data_parallel_rank(rank): + dp_rank = gpc.get_local_rank(ParallelMode.DATA) + + if rank in list(range(16)): + assert dp_rank == 0 + elif rank in list(range(16, 32)): + assert dp_rank == 1 + + +def check_pipeline_parallel_rank(rank): + ppr = gpc.get_local_rank(ParallelMode.PIPELINE) + + if rank in list(range(8)): + assert ppr == 0 + elif rank in list(range(8, 16)): + assert ppr == 1 + elif rank in list(range(16, 24)): + assert ppr == 0 + elif rank in list(range(24, 32)): + assert ppr == 1 + + +def check_tensor_parallel_rank(rank): + tp_rank = gpc.get_local_rank(ParallelMode.TENSOR) + + for i in range(8): + ranks = list(range(i, 32, 8)) + if rank in ranks: + assert tp_rank == i, f'{rank}:{tp_rank}' + + +def check_2p5d_parallel_rank(rank): + rp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + cp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + dp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + xp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_XZ) + + # check for row parallel group + for i in range(2): + ranks = list(range(i, 32, 2)) + if rank in ranks: + assert rp_rank == i + + # check for col parallel group + for i in range(2): + ranks = list(range(i * 2, 32, 4)) + ranks_plus_ones = [val + 1 for val in ranks] + ranks.extend(ranks_plus_ones) + if rank in ranks: + assert cp_rank == i + + # check for depth parallel group + for i in range(2): + ranks = [] + for j in range(i * 4, 32, 8): + ranks.extend([j + k for k in range(4)]) + if rank in ranks: + assert dp_rank == i + + # check for xz parallel group + for i in range(2): + ranks = list(range(i * 2, 32, 8)) + ranks_plus_one = [val + 1 for val in ranks] + ranks.extend(ranks_plus_one) + if rank in ranks: + assert xp_rank == i + + +def init_2halfd(local_rank, world_size, backend, port, host): + dist_args = dict( + config=CONFIG_PATH, + local_rank=local_rank, + world_size=world_size, + backend=backend, + port=port, + host=host + ) + init_dist(**dist_args) + check_data_parallel_rank(local_rank) + check_pipeline_parallel_rank(local_rank) + check_tensor_parallel_rank(local_rank) + check_2p5d_parallel_rank(local_rank) + gpc.destroy() + + +@pytest.mark.cpu +def test_2halfd_init(): + """ + As no computation or communication is done, we can run this test on CPU. + """ + world_size = 32 + test_fn = partial(init_2halfd, + world_size=world_size, + backend='gloo', + port='29501', + host='localhost' + ) + mp.spawn(test_fn, nprocs=world_size) + + +if __name__ == '__main__': + test_2halfd_init() diff --git a/tests/test_context/test_3d_init.py b/tests/test_context/test_3d_init.py new file mode 100644 index 000000000..0fba98bff --- /dev/null +++ b/tests/test_context/test_3d_init.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from functools import partial +from pathlib import Path + +import pytest +import torch.multiprocessing as mp + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.initialize import init_dist + +CONFIG_PATH = Path(__file__).parent.joinpath('configs/parallel_3d_init.py').absolute() + + +def check_data_parallel_rank(rank): + dp_rank = gpc.get_local_rank(ParallelMode.DATA) + + if rank in list(range(16)): + assert dp_rank == 0 + elif rank in list(range(16, 32)): + assert dp_rank == 1 + + +def check_pipeline_parallel_rank(rank): + ppr = gpc.get_local_rank(ParallelMode.PIPELINE) + + if rank in list(range(8)): + assert ppr == 0 + elif rank in list(range(8, 16)): + assert ppr == 1 + elif rank in list(range(16, 24)): + assert ppr == 0 + elif rank in list(range(24, 32)): + assert ppr == 1 + + +def check_tensor_parallel_rank(rank): + tp_rank = gpc.get_local_rank(ParallelMode.TENSOR) + + for i in range(8): + ranks = list(range(i, 32, 8)) + if rank in ranks: + assert tp_rank == i + + +def check_3d_parallel_rank(rank): + ip_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + wp_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + op_rank = gpc.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + # check for input parallel group + for i in range(2): + _ranks = list(range(i * 2, 32, 4)) + _ranks_plus_one = [val + 1 for val in _ranks] + input_ranks = _ranks + _ranks_plus_one + if rank in input_ranks: + assert ip_rank == i + + # check for weight parallel group + for i in range(2): + ranks = list(range(i, 32, 2)) + + if rank in ranks: + assert wp_rank == i + + # check for output parallel group + for i in range(2): + ranks = [] + for j in range(i * 4, 32, 8): + ranks.extend([j + k for k in range(4)]) + if rank in ranks: + assert op_rank == i + + +def init_3d(local_rank, world_size, backend, port, host): + dist_args = dict( + config=CONFIG_PATH, + local_rank=local_rank, + world_size=world_size, + backend=backend, + port=port, + host=host + ) + init_dist(**dist_args) + check_tensor_parallel_rank(local_rank) + check_3d_parallel_rank(local_rank) + check_data_parallel_rank(local_rank) + check_pipeline_parallel_rank(local_rank) + print('pass') + gpc.destroy() + + +@pytest.mark.cpu +def test_3d_init(): + """ + As no computation or communication is done, we can run this test on CPU. + """ + world_size = 32 + test_fn = partial(init_3d, + world_size=world_size, + backend='gloo', + port='29502', + host='localhost' + ) + mp.spawn(test_fn, nprocs=world_size) + + +if __name__ == '__main__': + test_3d_init() diff --git a/tests/test_data/test_cifar10_dataset.py b/tests/test_data/test_cifar10_dataset.py new file mode 100644 index 000000000..10b79dd03 --- /dev/null +++ b/tests/test_data/test_cifar10_dataset.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from pathlib import Path + +import pytest +from torch.utils.data import DataLoader + +from colossalai.builder import build_dataset +from colossalai.context import Config + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=True, + download=True, + transform_pipeline=[ + dict(type='ToTensor'), + dict(type='Normalize', + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5)) + ]), + dataloader=dict(batch_size=4, shuffle=True, num_workers=2) +) + + +@pytest.mark.cpu +def test_cifar10_dataset(): + global train_data + config = Config(train_data) + dataset = build_dataset(config.dataset) + dataloader = DataLoader(dataset=dataset, **config.dataloader) + data_iter = iter(dataloader) + img, label = data_iter.next() + + assert isinstance(img, list) and isinstance(label, list), \ + f'expected the img and label to be list but got {type(img)} and {type(label)}' + + +if __name__ == '__main__': + test_cifar10_dataset() diff --git a/tests/test_data/test_data_parallel_sampler.py b/tests/test_data/test_data_parallel_sampler.py new file mode 100644 index 000000000..056f0441a --- /dev/null +++ b/tests/test_data/test_data_parallel_sampler.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from functools import partial +from pathlib import Path + +import pytest +import torch.cuda +import torch.distributed as dist +import torch.multiprocessing as mp +from torch.utils.data import DataLoader + +import colossalai +from colossalai.builder import build_dataset, build_data_sampler +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + +CONFIG = dict( + train_data=dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=True, + download=True, + transform_pipeline=[ + dict(type='ToTensor'), + dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + ] + ), + dataloader=dict( + num_workers=2, + batch_size=8, + sampler=dict( + type='DataParallelSampler', + ) + ) + ), + parallel=dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None), + ), + seed=1024, +) + + +def run_data_sampler(local_rank, world_size): + dist_args = dict( + config=CONFIG, + local_rank=local_rank, + world_size=world_size, + backend='gloo', + port='29503', + host='localhost' + ) + colossalai.init_dist(**dist_args) + print('finished initialization') + + dataset = build_dataset(gpc.config.train_data.dataset) + sampler_cfg = gpc.config.train_data.dataloader.pop('sampler') + sampler = build_data_sampler(sampler_cfg, dataset) + dataloader = DataLoader(dataset=dataset, sampler=sampler, **gpc.config.train_data.dataloader) + data_iter = iter(dataloader) + img, label = data_iter.next() + img = img[0] + + if gpc.get_local_rank(ParallelMode.DATA) != 0: + img_to_compare = img.clone() + else: + img_to_compare = img + dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA)) + + if gpc.get_local_rank(ParallelMode.DATA) != 0: + assert not torch.equal(img, + img_to_compare), 'Same image was distributed across ranks but expected it to be different' + + +@pytest.mark.cpu +def test_data_sampler(): + world_size = 4 + test_func = partial(run_data_sampler, world_size=world_size) + mp.spawn(test_func, nprocs=world_size) + + +if __name__ == '__main__': + test_data_sampler() diff --git a/tests/test_data/test_deterministic_dataloader.py b/tests/test_data/test_deterministic_dataloader.py new file mode 100644 index 000000000..9cfd6c4fc --- /dev/null +++ b/tests/test_data/test_deterministic_dataloader.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from functools import partial +from pathlib import Path + +import pytest +import torch.cuda +import torch.distributed as dist +import torch.multiprocessing as mp +from torch.utils.data import DataLoader + +import colossalai +from colossalai.builder import build_dataset +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc + +CONFIG = dict( + train_data=dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=True, + download=True, + transform_pipeline=[ + dict(type='ToTensor'), + dict(type='RandomCrop', size=32), + dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + ] + ), + dataloader=dict( + num_workers=2, + batch_size=2, + shuffle=True + ) + ), + parallel=dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None), + ), + seed=1024, +) + + +def run_data_sampler(local_rank, world_size): + dist_args = dict( + config=CONFIG, + local_rank=local_rank, + world_size=world_size, + backend='gloo', + port='29499', + host='localhost' + ) + colossalai.init_dist(**dist_args) + gpc.set_seed() + + print('finished initialization') + + dataset = build_dataset(gpc.config.train_data.dataset) + dataloader = DataLoader(dataset=dataset, **gpc.config.train_data.dataloader) + data_iter = iter(dataloader) + img, label = data_iter.next() + img = img[0] + + if gpc.get_local_rank(ParallelMode.DATA) != 0: + img_to_compare = img.clone() + else: + img_to_compare = img + dist.broadcast(img_to_compare, src=0, group=gpc.get_group(ParallelMode.DATA)) + + if gpc.get_local_rank(ParallelMode.DATA) != 0: + # this is without sampler + # this should be false if data parallel sampler to given to the dataloader + assert torch.equal(img, + img_to_compare), 'Same image was distributed across ranks and expected it to be the same' + + +@pytest.mark.cpu +def test_data_sampler(): + world_size = 4 + test_func = partial(run_data_sampler, world_size=world_size) + mp.spawn(test_func, nprocs=world_size) + + +if __name__ == '__main__': + test_data_sampler() diff --git a/tests/test_data_pipeline_tensor_parallel/configs/vit_2d.py b/tests/test_data_pipeline_tensor_parallel/configs/vit_2d.py new file mode 100644 index 000000000..907605317 --- /dev/null +++ b/tests/test_data_pipeline_tensor_parallel/configs/vit_2d.py @@ -0,0 +1,159 @@ +import os +from pathlib import Path + +from colossalai.engine import AMP_TYPE + +BATCH_SIZE = 256 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + # num_workers=1, + # shuffle=True, + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + # num_workers=1, + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2D', +) + +# model = dict( +# type='VanillaResNet', +# block_type='ResNetBasicBlock', +# layers=[2, 2, 2, 2], +# num_cls=10 +# ) + + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=2), + tensor=dict(size=4, mode='2d'), +) + +fp16 = dict( + mode=AMP_TYPE.PARALLEL, + initial_scale=2 ** 8 +) + +# fp16 = dict( +# mode=None, +# ) + +schedule = dict( + num_microbatches=2 +) +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +num_epochs = 60 + +logging = dict( + root_path='test_vit_2d_log' +) + +seed = 100 diff --git a/tests/test_data_pipeline_tensor_parallel/configs/vit_2p5d.py b/tests/test_data_pipeline_tensor_parallel/configs/vit_2p5d.py new file mode 100644 index 000000000..d41ecea89 --- /dev/null +++ b/tests/test_data_pipeline_tensor_parallel/configs/vit_2p5d.py @@ -0,0 +1,137 @@ +import os +from pathlib import Path + +BATCH_SIZE = 250 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=0, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=0, + shuffle=True + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2p5D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2p5D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2p5D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2p5D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2p5D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2p5D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2p5D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2p5D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2p5D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=2), + tensor=dict(size=4, depth=1, mode='2.5d'), +) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +schedule = dict( + num_microbatches=2 +) + +num_epochs = 60 +num_microbatches = 1 diff --git a/tests/test_data_pipeline_tensor_parallel/test.sh b/tests/test_data_pipeline_tensor_parallel/test.sh new file mode 100644 index 000000000..1c6012a52 --- /dev/null +++ b/tests/test_data_pipeline_tensor_parallel/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +test_file=$1 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 diff --git a/tests/test_data_pipeline_tensor_parallel/test_vit_2d/test_vit_2d.py b/tests/test_data_pipeline_tensor_parallel/test_vit_2d/test_vit_2d.py new file mode 100644 index 000000000..9ffd0a1ec --- /dev/null +++ b/tests/test_data_pipeline_tensor_parallel/test_vit_2d/test_vit_2d.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2d.py') + + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + + if gpc.is_last_rank(ParallelMode.PIPELINE): + # loss = sum(loss) + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output, + ParallelMode.PARALLEL_2D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2D_COL, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label == output) + correct_sum += correct + total_sum += label.size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + + if gpc.is_last_rank(ParallelMode.PIPELINE): + accumulated_loss += loss.detach().cpu().numpy() + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + if gpc.is_last_rank(ParallelMode.PIPELINE): + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + if gpc.is_last_rank(ParallelMode.PIPELINE): + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2d_parallel_vision_transformer() diff --git a/tests/test_data_pipeline_tensor_parallel/test_vit_2p5d/test_vit_2p5d.py b/tests/test_data_pipeline_tensor_parallel/test_vit_2p5d/test_vit_2p5d.py new file mode 100644 index 000000000..33d56360a --- /dev/null +++ b/tests/test_data_pipeline_tensor_parallel/test_vit_2p5d/test_vit_2p5d.py @@ -0,0 +1,94 @@ +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py') + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + + if gpc.is_last_rank(ParallelMode.PIPELINE): + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_COL, + 0, + ) + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_DEP, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label == output) + correct_sum += correct + total_sum += label.size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + + if gpc.is_last_rank(ParallelMode.PIPELINE): + accumulated_loss += loss.detach().cpu().numpy() + + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2p5d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + if gpc.is_last_rank(ParallelMode.PIPELINE): + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + if gpc.is_last_rank(ParallelMode.PIPELINE): + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2p5d_parallel_vision_transformer() \ No newline at end of file diff --git a/tests/test_engine/configs/non_pipeline_resnet.py b/tests/test_engine/configs/non_pipeline_resnet.py new file mode 100644 index 000000000..de78154ec --- /dev/null +++ b/tests/test_engine/configs/non_pipeline_resnet.py @@ -0,0 +1,42 @@ +import os +from pathlib import Path + +BATCH_SIZE = 128 +IMG_SIZE = 224 +DIM = 768 +NUM_CLASSES = 10 +NUM_ATTN_HEADS = 12 + +# resnet 18 +model = dict(type='VanillaResNet', + block_type='ResNetBasicBlock', + layers=[2, 2, 2, 2], + num_cls=10) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None) +) + +train_data = dict(dataset=dict(type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + download=True, + transform_pipeline=[ + dict(type='Resize', + size=(IMG_SIZE, IMG_SIZE)), + dict(type='ToTensor'), + dict(type='Normalize', + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5)) + ]), + dataloader=dict(batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + drop_last=True)) + +optimizer = dict(type='Adam', lr=0.001) + +loss = dict(type='CrossEntropyLoss') + +# set_device_func = lambda global_rank, world_size: global_rank % 4 +seed = 1024 diff --git a/tests/test_engine/configs/non_pipeline_resnet_apex_amp.py b/tests/test_engine/configs/non_pipeline_resnet_apex_amp.py new file mode 100644 index 000000000..b6300b8c4 --- /dev/null +++ b/tests/test_engine/configs/non_pipeline_resnet_apex_amp.py @@ -0,0 +1,45 @@ +import os +from pathlib import Path + +from colossalai.engine import AMP_TYPE + +BATCH_SIZE = 128 +IMG_SIZE = 224 +DIM = 768 +NUM_CLASSES = 10 +NUM_ATTN_HEADS = 12 + +# resnet 18 +model = dict(type='VanillaResNet', + block_type='ResNetBasicBlock', + layers=[2, 2, 2, 2], + num_cls=10) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None) +) + +train_data = dict(dataset=dict(type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + download=True, + transform_pipeline=[ + dict(type='Resize', + size=(IMG_SIZE, IMG_SIZE)), + dict(type='ToTensor'), + dict(type='Normalize', + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5)) + ]), + dataloader=dict(batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + drop_last=True)) + +optimizer = dict(type='Adam', lr=0.001) + +loss = dict(type='CrossEntropyLoss') +fp16 = dict(mode=AMP_TYPE.APEX) + +# set_device_func = lambda global_rank, world_size: global_rank % 4 +seed = 1024 diff --git a/tests/test_engine/configs/non_pipeline_resnet_torch_amp.py b/tests/test_engine/configs/non_pipeline_resnet_torch_amp.py new file mode 100644 index 000000000..87fd68554 --- /dev/null +++ b/tests/test_engine/configs/non_pipeline_resnet_torch_amp.py @@ -0,0 +1,45 @@ +import os +from pathlib import Path + +from colossalai.engine import AMP_TYPE + +BATCH_SIZE = 128 +IMG_SIZE = 224 +DIM = 768 +NUM_CLASSES = 10 +NUM_ATTN_HEADS = 12 + +# resnet 18 +model = dict(type='VanillaResNet', + block_type='ResNetBasicBlock', + layers=[2, 2, 2, 2], + num_cls=10) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None) +) + +train_data = dict(dataset=dict(type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + download=True, + transform_pipeline=[ + dict(type='Resize', + size=(IMG_SIZE, IMG_SIZE)), + dict(type='ToTensor'), + dict(type='Normalize', + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5)) + ]), + dataloader=dict(batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + drop_last=True)) + +optimizer = dict(type='Adam', lr=0.001) + +loss = dict(type='CrossEntropyLoss') +fp16 = dict(mode=AMP_TYPE.TORCH) + +# set_device_func = lambda global_rank, world_size: global_rank % 4 +seed = 1024 diff --git a/tests/test_engine/configs/pipeline_vanilla_resnet.py b/tests/test_engine/configs/pipeline_vanilla_resnet.py new file mode 100644 index 000000000..9820d3b82 --- /dev/null +++ b/tests/test_engine/configs/pipeline_vanilla_resnet.py @@ -0,0 +1,48 @@ +import os +from pathlib import Path + +BATCH_SIZE = 128 +IMG_SIZE = 224 +DIM = 768 +NUM_CLASSES = 10 +NUM_ATTN_HEADS = 12 + +# resnet 18 +model = dict(type='VanillaResNet', + block_type='ResNetBasicBlock', + layers=[2, 2, 2, 2], + num_cls=10) + +train_data = dict(dataset=dict(type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + download=True, + transform_pipeline=[ + dict(type='Resize', + size=(IMG_SIZE, IMG_SIZE)), + dict(type='ToTensor'), + dict(type='Normalize', + mean=(0.5, 0.5, 0.5), + std=(0.5, 0.5, 0.5)) + ]), + dataloader=dict(batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + drop_last=True)) + +optimizer = dict(type='Adam', lr=0.001) + +loss = dict(type='CrossEntropyLoss') + +parallel = dict( + pipeline=dict(size=4), + tensor=dict(size=1, mode=None) +) + +schedule = dict( + num_microbatches=4 +) +num_pipeling_batches = 2 +seed = 1024 +lr_scheduler = dict(type='LinearWarmupLR', warmup_steps=5) + +num_epochs = 10 diff --git a/tests/test_engine/test.sh b/tests/test_engine/test.sh new file mode 100644 index 000000000..24d0c5423 --- /dev/null +++ b/tests/test_engine/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +test_file=$1 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 \ No newline at end of file diff --git a/tests/test_engine/test_non_pipeline_engine/test_engine_apex_amp.py b/tests/test_engine/test_non_pipeline_engine/test_engine_apex_amp.py new file mode 100644 index 000000000..fe6b4010b --- /dev/null +++ b/tests/test_engine/test_non_pipeline_engine/test_engine_apex_amp.py @@ -0,0 +1,54 @@ +# !/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import pytest +import torch + +from colossalai import initialize +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.utils import report_memory_usage + +NUM_BATCH = 128 +NUM_MICRO = 6 + +BATCH_SIZE = 32 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +NO_PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/non_pipeline_resnet_apex_amp.py') + + +def run_no_pipeline(config): + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config) + logger = get_global_dist_logger() + rank = torch.distributed.get_rank() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + criterion=criterion, + optimizer=optimizer, + schedule=schedule) + engine.train() + logger.info('lr = %g' % engine.get_lr()) + output, label, loss = engine.step() + logger.info('Rank {} returns: {}'.format(rank, loss.item())) + logger.info('lr = %g' % engine.get_lr()) + + gpc.destroy() + logger.info('Test engine finished') + report_memory_usage("After testing") + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_engine(): + run_no_pipeline(NO_PIPE_CONFIG_PATH) + + +if __name__ == '__main__': + test_engine() diff --git a/tests/test_engine/test_non_pipeline_engine/test_engine_no_amp.py b/tests/test_engine/test_non_pipeline_engine/test_engine_no_amp.py new file mode 100644 index 000000000..865f2b04e --- /dev/null +++ b/tests/test_engine/test_non_pipeline_engine/test_engine_no_amp.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import pytest +import torch + +from colossalai import initialize +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.utils import report_memory_usage + +NUM_BATCH = 128 +NUM_MICRO = 6 + +BATCH_SIZE = 32 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +NO_PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/non_pipeline_resnet.py') + + +def test_no_pipeline(config): + print('Test no pipeline engine start') + + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config) + logger = get_global_dist_logger() + + rank = torch.distributed.get_rank() + engine = Engine(model=model, + train_dataloader=train_dataloader, + criterion=criterion, + optimizer=optimizer, + schedule=schedule) + + engine.train() + logger.info('lr = %g' % engine.get_lr()) + output, label, loss = engine.step() + logger.info('Rank {} returns: {}'.format(rank, loss.item())) + logger.info('lr = %g' % engine.get_lr()) + + gpc.destroy() + logger.info('Test engine finished') + report_memory_usage("After testing") + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_engine(): + test_no_pipeline(NO_PIPE_CONFIG_PATH) + + +if __name__ == '__main__': + test_engine() diff --git a/tests/test_engine/test_non_pipeline_engine/test_engine_torch_amp.py b/tests/test_engine/test_non_pipeline_engine/test_engine_torch_amp.py new file mode 100644 index 000000000..83c6927f3 --- /dev/null +++ b/tests/test_engine/test_non_pipeline_engine/test_engine_torch_amp.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import pytest +import torch + +from colossalai import initialize +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.utils import report_memory_usage + +NUM_BATCH = 128 +NUM_MICRO = 6 + +BATCH_SIZE = 32 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +NO_PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/non_pipeline_resnet_torch_amp.py') + + +def test_no_pipeline(config): + print('Test no pipeline engine start') + + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config) + logger = get_global_dist_logger() + + rank = torch.distributed.get_rank() + engine = Engine(model=model, + train_dataloader=train_dataloader, + criterion=criterion, + optimizer=optimizer, + schedule=schedule) + + engine.train() + logger.info('lr = %g' % engine.get_lr()) + output, label, loss = engine.step() + logger.info('Rank {} returns: {}'.format(rank, loss.item())) + logger.info('lr = %g' % engine.get_lr()) + + gpc.destroy() + logger.info('Test engine finished') + report_memory_usage("After testing") + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_engine(): + test_no_pipeline(NO_PIPE_CONFIG_PATH) + + +if __name__ == '__main__': + test_engine() diff --git a/tests/test_engine/test_pipeline/debug_schedule.py b/tests/test_engine/test_pipeline/debug_schedule.py new file mode 100644 index 000000000..ea3799dfd --- /dev/null +++ b/tests/test_engine/test_pipeline/debug_schedule.py @@ -0,0 +1,232 @@ +# referenced from Megatron and used to testify communication +import os.path as osp + +import pytest +import torch +from torch.utils.data import DataLoader + +from colossalai.builder import ModelInitializer, build_dataset, build_optimizer, build_loss +from colossalai.communication import p2p as p2p_communication +from colossalai.communication.utils import send_tensor_meta, recv_tensor_meta +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.initialize import initialize +from colossalai.utils import print_rank_0, get_current_device + +NUM_BATCH = 128 +NUM_MICRO = 6 + + +def get_num_microbatches(): + return NUM_MICRO + + +def to_cuda(data): + if isinstance(data, (tuple, list)): + data = data[0].to(get_current_device()) + else: + data = data.to(get_current_device()) + return data + + +def step_func(loss): + def _step_func(input_tensor, model): + output = model(input_tensor) + if isinstance(output, (tuple, list)): + if len(output) > 1: + raise NotImplementedError("Multiple output!!!") + else: + output = output[0] + return output, loss + + return _step_func + + +def forward_step(forward_step_func, data_iterator, model, input_tensor, losses_reduced): + """Forward step for passed-in model. + If first stage, input tensor is obtained from data_iterator, otherwise + passed-in input_tensor is used. + Returns output tensor.""" + + if input_tensor is None: + data, label = data_iterator.next() + input_tensor = to_cuda(data) + + output_tensor, loss_func = forward_step_func(input_tensor, model) + if gpc.is_last_rank(ParallelMode.PIPELINE): + data, label = data_iterator.next() + label = to_cuda(label) + output_tensor = loss_func(output_tensor, label) / get_num_microbatches() + losses_reduced.append(output_tensor) + + return output_tensor + + +def backward_step(optimizer, input_tensor, output_tensor, output_tensor_grad): + """Backward step through passed-in output tensor. + If last stage, output_tensor_grad is None, otherwise gradient of loss + with respect to stage's output tensor. + Returns gradient of loss with respect to input tensor (None if first + stage).""" + + # Retain the grad on the input_tensor. + if input_tensor is not None: + input_tensor.retain_grad() + + # Backward pass. + torch.autograd.backward(output_tensor, grad_tensors=output_tensor_grad) + + # Collect the grad of the input_tensor. + input_tensor_grad = None + if input_tensor is not None: + input_tensor_grad = input_tensor.grad + + return input_tensor_grad + + +def forward_backward_pipelining_without_interleaving(forward_step_func, data_iterator, + model, optimizer, forward_only): + """Run non-interleaved 1F1B schedule, with communication between pipeline + stages. + Returns dictionary with losses if the last stage, empty dict otherwise.""" + + # Compute number of warmup microbatches. + num_microbatches = get_num_microbatches() + num_warmup_microbatches = \ + (gpc.get_world_size(ParallelMode.PIPELINE) - + gpc.get_local_rank(ParallelMode.PIPELINE) - 1) + num_warmup_microbatches = min( + num_warmup_microbatches, + num_microbatches) + num_microbatches_remaining = \ + num_microbatches - num_warmup_microbatches + + # Input, output tensors only need to be saved when doing backward passes + input_tensors = None + output_tensors = None + if not forward_only: + input_tensors = [] + output_tensors = [] + losses_reduced = [] + + # Used for tensor meta information communication + ft_shape = None + bt_shape = None + fs_checker = True + + # Run warmup forward passes. + for i in range(num_warmup_microbatches): + if not gpc.is_first_rank(ParallelMode.PIPELINE): + ft_shape = recv_tensor_meta(ft_shape) + input_tensor = p2p_communication.recv_forward(ft_shape) + output_tensor = forward_step(forward_step_func, data_iterator, model, + input_tensor, losses_reduced) + if not gpc.is_last_rank(ParallelMode.PIPELINE): + bt_shape = output_tensor.shape + fs_checker = send_tensor_meta(output_tensor, fs_checker) + p2p_communication.send_forward(output_tensor) + + if not forward_only: + input_tensors.append(input_tensor) + output_tensors.append(output_tensor) + + # Before running 1F1B, need to receive first forward tensor. + # If all microbatches are run in warmup / cooldown phase, then no need to + # receive this tensor here. + if num_microbatches_remaining > 0: + if not gpc.is_first_rank(ParallelMode.PIPELINE): + ft_shape = recv_tensor_meta(ft_shape) + input_tensor = p2p_communication.recv_forward(ft_shape) + + # Run 1F1B in steady state. + for i in range(num_microbatches_remaining): + last_iteration = (i == (num_microbatches_remaining - 1)) + + output_tensor = forward_step(forward_step_func, data_iterator, model, + input_tensor, losses_reduced) + if forward_only: + p2p_communication.send_forward(output_tensor) + + if not last_iteration: + input_tensor = p2p_communication.recv_forward(ft_shape) + + else: + output_tensor_grad = \ + p2p_communication.send_forward_recv_backward(output_tensor, bt_shape) + + # Add input_tensor and output_tensor to end of list. + input_tensors.append(input_tensor) + output_tensors.append(output_tensor) + + # Pop input_tensor and output_tensor from the start of the list for + # the backward pass. + input_tensor = input_tensors.pop(0) + output_tensor = output_tensors.pop(0) + + input_tensor_grad = \ + backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad) + + if last_iteration: + input_tensor = None + p2p_communication.send_backward(input_tensor_grad) + else: + input_tensor = \ + p2p_communication.send_backward_recv_forward(input_tensor_grad, ft_shape) + + # Run cooldown backward passes. + if not forward_only: + for i in range(num_warmup_microbatches): + input_tensor = input_tensors.pop(0) + output_tensor = output_tensors.pop(0) + + output_tensor_grad = p2p_communication.recv_backward(bt_shape) + + input_tensor_grad = \ + backward_step(optimizer, input_tensor, output_tensor, + output_tensor_grad) + + p2p_communication.send_backward(input_tensor_grad) + + return losses_reduced + + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_vit.py') + + +@pytest.mark.skip(reason="This is only for debugging purpose, please ignore this test") +@pytest.mark.dist +def test_schedule(): + initialize(CONFIG_PATH) + + # build model + model = ModelInitializer(gpc.config.model, 1).model_initialize() + print_rank_0('model is created') + + # keep the same sampler for all process + torch.manual_seed(1331) + + dataset = build_dataset(gpc.config.data.dataset) + dataloader = DataLoader(dataset=dataset, **gpc.config.data.dataloader) + print_rank_0('train data is created') + + # build optimizer and loss + optim = build_optimizer(gpc.config.optimizer, model) + loss = build_loss(gpc.config.loss) + print_rank_0('optim and loss is created') + + forward_backward_pipelining_without_interleaving( + step_func(loss), + iter(dataloader), + model, + optim, + False + ) + + gpc.destroy() + print_rank_0('training finished') + + +if __name__ == '__main__': + test_schedule() diff --git a/tests/test_engine/test_pipeline/test_p2p.py b/tests/test_engine/test_pipeline/test_p2p.py new file mode 100644 index 000000000..aa1a0f5e1 --- /dev/null +++ b/tests/test_engine/test_pipeline/test_p2p.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import pytest +import torch +import torch.distributed as dist + +from colossalai.communication import (recv_backward, recv_forward, + recv_tensor_meta, send_backward, + send_backward_recv_forward, send_forward, + send_forward_recv_backward, + send_tensor_meta) +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.initialize import init_dist, parse_args +from colossalai.logging import get_global_dist_logger +from colossalai.utils import get_current_device + +BATCH_SIZE = 32 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 + +CONFIG = dict( + parallel=dict( + pipeline=dict(size=4), + tensor=dict(size=1, mode=None) + ), + seed=1024 +) + + +def check_equal(A, B): + return torch.allclose(A, B, rtol=1e-5, atol=1e-3) + + +def check_forward(output_tensor, rank, logger): + dist.barrier() + if gpc.is_first_rank(ParallelMode.PIPELINE): + tensor = output_tensor.clone() + else: + tensor = recv_forward(output_tensor.shape) + logger.info('Rank {} received forward. Correct tensor: {}'.format( + rank, check_equal(tensor, output_tensor))) + if not gpc.is_last_rank(ParallelMode.PIPELINE): + send_forward(tensor) + logger.info('Rank {} sent forward.'.format(rank)) + + +def check_backward(output_grad, rank, logger): + dist.barrier() + if gpc.is_last_rank(ParallelMode.PIPELINE): + grad = output_grad.clone() + else: + grad = recv_backward(output_grad.shape) + logger.info('Rank {} received backward. Correct grad: {}'.format( + rank, check_equal(grad, output_grad))) + if not gpc.is_first_rank(ParallelMode.PIPELINE): + send_backward(grad) + logger.info('Rank {} sent backward.'.format(rank)) + + +def check_forward_backward(output_tensor, output_grad, rank, logger): + dist.barrier() + if not gpc.is_first_rank(ParallelMode.PIPELINE): + tensor = send_backward_recv_forward(output_grad, output_tensor.shape) + logger.info( + 'Rank {} sent backward received forward. Correct tensor: {}'. + format(rank, check_equal(tensor, output_tensor))) + if not gpc.is_last_rank(ParallelMode.PIPELINE): + grad = send_forward_recv_backward(output_tensor, output_grad.shape) + logger.info( + 'Rank {} sent forward received backward. Correct grad: {}'.format( + rank, check_equal(grad, output_grad))) + + +def check_op(size, rank, prev_rank, next_rank, up_group, down_group, logger): + dtype = torch.float32 + device = get_current_device() + tensor_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + # recv_tensor_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + grad_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + tensor = torch.randn(tensor_shape, dtype=dtype, device=device) + dist.all_reduce(tensor) + grad = torch.randn(grad_shape, dtype=dtype, device=device) + dist.all_reduce(grad) + if rank % 2 == 0: + need_meta = True + need_meta = send_tensor_meta(tensor, need_meta) + logger.info('Rank {} shape sent (need meta: {}).'.format( + rank, need_meta)) + req = dist.broadcast(tensor, src=rank, group=down_group, async_op=True) + req.wait() + out = tensor.clone() + logger.info('Rank {} test op: tensor sent.'.format(rank)) + else: + recv_tensor_shape = recv_tensor_meta(None) + logger.info('Rank {} shape received. Correct shape: {}'.format( + rank, tensor_shape == recv_tensor_shape)) + out = torch.empty(recv_tensor_shape, dtype=dtype, device=device) + req = dist.broadcast(out, src=prev_rank, group=up_group, async_op=True) + req.wait() + logger.info('Rank {} test op: received tensor ({})'.format( + rank, out.shape)) + + logger.info('Rank {} test op. Correct tensor: {}'.format( + rank, check_equal(tensor, out))) + + +def test_comm(size, rank, prev_rank, next_rank, up_group, down_group, logger): + dtype = torch.float32 + device = get_current_device() + tensor_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + grad_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + tensor = torch.randn(tensor_shape, dtype=dtype, device=device) + dist.all_reduce(tensor) + grad = torch.randn(grad_shape, dtype=dtype, device=device) + dist.all_reduce(grad) + check_op(size, rank, prev_rank, next_rank, up_group, down_group, logger) + check_forward(tensor, rank, logger) + check_backward(grad, rank, logger) + check_forward_backward(tensor, grad, rank, logger) + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_main(): + args = parse_args() + world_size = args.world_size + + init_dist(CONFIG) + logger = get_global_dist_logger() + rank = gpc.get_global_rank() + prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) + up_ranks = gpc.get_ranks_in_group(ParallelMode.PIPELINE_PREV) + up_group = gpc.get_group(ParallelMode.PIPELINE_PREV) + next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) + down_ranks = gpc.get_ranks_in_group(ParallelMode.PIPELINE_NEXT) + down_group = gpc.get_group(ParallelMode.PIPELINE_NEXT) + logger.info( + 'Rank {0}: prev rank {1} (up: {2}), next rank {3} (down: {4})'.format( + rank, prev_rank, up_ranks, next_rank, down_ranks)) + logger.info('Distributed environment is initialzied.') + + test_comm(world_size, rank, prev_rank, next_rank, up_group, down_group, + logger) + + +if __name__ == '__main__': + test_main() diff --git a/tests/test_engine/test_pipeline/test_partition.py b/tests/test_engine/test_pipeline/test_partition.py new file mode 100644 index 000000000..65c108162 --- /dev/null +++ b/tests/test_engine/test_pipeline/test_partition.py @@ -0,0 +1,37 @@ +import os.path as osp + +import pytest +import torch +from torch.utils.data import DataLoader + +from colossalai.builder import build_dataset, ModelInitializer +from colossalai.core import global_context +from colossalai.initialize import init_dist +from colossalai.logging import get_global_dist_logger + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py') + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_partition(): + init_dist(CONFIG_PATH) + logger = get_global_dist_logger() + logger.info('finished initialization') + + # build model + model = ModelInitializer(global_context.config.model, 1, verbose=True).model_initialize() + logger.info('model is created') + + dataset = build_dataset(global_context.config.train_data.dataset) + dataloader = DataLoader(dataset=dataset, **global_context.config.train_data.dataloader) + logger.info('train data is created') + + global_context.destroy() + torch.cuda.synchronize() + logger.info('training finished') + + +if __name__ == '__main__': + test_partition() diff --git a/tests/test_engine/test_pipeline/test_schedule.py b/tests/test_engine/test_pipeline/test_schedule.py new file mode 100644 index 000000000..32fcaafc1 --- /dev/null +++ b/tests/test_engine/test_pipeline/test_schedule.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import pytest + +from colossalai.core import global_context as gpc +from colossalai.initialize import initialize +from colossalai.logging import get_global_dist_logger + +NUM_BATCH = 128 + +BATCH_SIZE = 32 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py') + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_schedule(): + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(CONFIG_PATH) + logger = get_global_dist_logger() + + schedule.zero_grad() + output, label, losses = schedule.forward_backward_step(forward_only=False) + schedule.step() + logger.info('losses: {}'.format([loss.item() for loss in losses])) + + gpc.destroy() + logger.info('training finished') + + +if __name__ == '__main__': + test_schedule() diff --git a/tests/test_engine/test_pipeline_engine/test_engine.py b/tests/test_engine/test_pipeline_engine/test_engine.py new file mode 100644 index 000000000..7ed0b0a3d --- /dev/null +++ b/tests/test_engine/test_pipeline_engine/test_engine.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import pytest +import torch + +from colossalai import initialize +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger + +NUM_BATCH = 128 + +BATCH_SIZE = 32 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 + +DIR_PATH = osp.dirname(osp.realpath(__file__)) +PIPE_CONFIG_PATH = osp.join(DIR_PATH, '../configs/pipeline_vanilla_resnet.py') + + +def run_pipeline(config): + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = initialize(config) + logger = get_global_dist_logger() + rank = torch.distributed.get_rank() + engine = Engine(model=model, + train_dataloader=train_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + engine.train() + logger.info('lr = %g' % engine.get_lr()) + outputs, labels, loss = engine.step() + if gpc.is_last_rank(ParallelMode.PIPELINE): + logger.info('losses: {}'.format(rank, loss.item())) + logger.info('lr = %g' % engine.get_lr()) + + gpc.destroy() + logger.info('Test engine pipeline finished') + + +@pytest.mark.skip("This test should be invoked using the test.sh provided") +@pytest.mark.dist +def test_engine(): + run_pipeline(PIPE_CONFIG_PATH) + + +if __name__ == '__main__': + test_engine() diff --git a/tests/test_fp16_optimizer/configs/vit_2d.py b/tests/test_fp16_optimizer/configs/vit_2d.py new file mode 100644 index 000000000..bcef5e2d4 --- /dev/null +++ b/tests/test_fp16_optimizer/configs/vit_2d.py @@ -0,0 +1,140 @@ +import os +from pathlib import Path + +from colossalai.engine import AMP_TYPE + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +fp16 = dict( + mode=AMP_TYPE.PARALLEL, + initial_scale=2 ** 4 +) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +num_epochs = 60 diff --git a/tests/test_fp16_optimizer/test.sh b/tests/test_fp16_optimizer/test.sh new file mode 100644 index 000000000..24d0c5423 --- /dev/null +++ b/tests/test_fp16_optimizer/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +test_file=$1 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 \ No newline at end of file diff --git a/tests/test_fp16_optimizer/test_vit_2d/test_vit_2d.py b/tests/test_fp16_optimizer/test_vit_2d/test_vit_2d.py new file mode 100644 index 000000000..a02ede90c --- /dev/null +++ b/tests/test_fp16_optimizer/test_vit_2d/test_vit_2d.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2d.py') + + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output[0], + ParallelMode.PARALLEL_2D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2D_COL, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label[0] == output) + correct_sum += correct + total_sum += label[0].size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.squeeze(0).detach().cpu().numpy() + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + logger.info('start training') + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2d_parallel_vision_transformer() diff --git a/tests/test_layers/test.sh b/tests/test_layers/test.sh new file mode 100644 index 000000000..24d0c5423 --- /dev/null +++ b/tests/test_layers/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +test_file=$1 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 \ No newline at end of file diff --git a/tests/test_layers/test_1d/common.py b/tests/test_layers/test_1d/common.py new file mode 100644 index 000000000..64d4601cb --- /dev/null +++ b/tests/test_layers/test_1d/common.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +DEPTH = 2 +BATCH_SIZE = 8 +SEQ_LENGTH = 8 +HIDDEN_SIZE = 8 + + +def check_equal(A, B): + assert torch.allclose(A, B, rtol=1e-5, atol=1e-2) == True diff --git a/tests/test_layers/test_1d/test_1d.py b/tests/test_layers/test_1d/test_1d.py new file mode 100644 index 000000000..e89cfe972 --- /dev/null +++ b/tests/test_layers/test_1d/test_1d.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import pytest + +from colossalai.core import global_context as gpc +from colossalai.initialize import init_dist +from test_layer import check_linear_col, check_linear_row + +CONFIG = dict( + parallel=dict( + pipeline=dict(size=1), + tensor=dict( + size=2, + mode='1d' + ) + ), +) + + +def check_layer(): + check_linear_col() + check_linear_row() + # check_attention() + # check_mlp() + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2d(): + init_dist(config=CONFIG) + gpc.set_seed() + check_layer() + gpc.destroy() + + +if __name__ == '__main__': + test_2d() diff --git a/tests/test_layers/test_1d/test_layer.py b/tests/test_layers/test_1d/test_layer.py new file mode 100644 index 000000000..59551a5ca --- /dev/null +++ b/tests/test_layers/test_1d/test_layer.py @@ -0,0 +1,211 @@ +import torch +import torch.distributed as dist +from torch.nn import Parameter + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn import Linear1D_Col, Linear1D_Row +# TransformerMLP1D, \ +# TransformerSelfAttention1D, TransformerEncoderLayer1D +from colossalai.utils import get_current_device, print_rank_0 +from common import HIDDEN_SIZE, DEPTH, BATCH_SIZE, SEQ_LENGTH, check_equal + + +def check_linear_col(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + OUTPUT_SIZE = 2 * HIDDEN_SIZE + + i = gpc.get_local_rank(ParallelMode.PARALLEL_1D) + + layer = Linear1D_Col(INPUT_SIZE, OUTPUT_SIZE, gather_output=True) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + dist.broadcast(A_master, src=0) + A = A_master.clone() + A.requires_grad = True + + W_shape = (OUTPUT_SIZE, INPUT_SIZE) + W_master = torch.randn(W_shape, dtype=dtype, device=device) + dist.broadcast(W_master, src=0) + W = torch.chunk(W_master, DEPTH, dim=0)[i] + W = W.clone() + W.requires_grad = True + + B_shape = (OUTPUT_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + dist.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[i] + B = B.clone() + B.requires_grad = True + + layer.weight = Parameter(W) + layer.bias = Parameter(B) + out = layer(A) + + A_master = A_master.clone() + A_master.requires_grad = True + W_master = W_master.clone() + W_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, W_master.transpose(0, 1)) + B_master + C = C_master.clone() + + check_equal(out, C) + print_rank_0('linear_col gather_output forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + dist.broadcast(grad_master, src=0) + grad = grad_master.detach() + out.backward(grad) + + C_master.backward(grad) + A_grad = A_master.grad + check_equal(A_grad, A.grad) + + W_grad = W_master.grad + W_grad = torch.chunk(W_grad, DEPTH, dim=0)[i] + check_equal(W_grad, layer.weight.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] + check_equal(B_grad, layer.bias.grad) + + print_rank_0('linear_col gather_output backward: pass') + + +def check_linear_row(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + OUTPUT_SIZE = 2 * HIDDEN_SIZE + + i = gpc.get_local_rank(ParallelMode.PARALLEL_1D) + + layer = Linear1D_Row(OUTPUT_SIZE, INPUT_SIZE, parallel_input=False) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, OUTPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + dist.broadcast(A_master, src=0) + A = A_master.clone() + A.requires_grad = True + + W_shape = (INPUT_SIZE, OUTPUT_SIZE) + W_master = torch.randn(W_shape, dtype=dtype, device=device) + dist.broadcast(W_master, src=0) + W = torch.chunk(W_master, DEPTH, dim=-1)[i] + W = W.clone() + W.requires_grad = True + + B_shape = (INPUT_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + dist.broadcast(B_master, src=0) + B = B_master.clone() + B.requires_grad = True + + layer.weight = Parameter(W) + layer.bias = Parameter(B) + out = layer(A) + + A_master = A_master.clone() + A_master.requires_grad = True + W_master = W_master.clone() + W_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, W_master.transpose(0, 1)) + B_master + C = C_master.clone() + + check_equal(out, C) + print_rank_0('linear_row no parallel_input forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + dist.broadcast(grad_master, src=0) + grad = grad_master.detach() + out.backward(grad) + + C_master.backward(grad) + A_grad = A_master.grad + check_equal(A_grad, A.grad) + + W_grad = W_master.grad + W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[i] + check_equal(W_grad, layer.weight.grad) + + B_grad = B_master.grad + check_equal(B_grad, layer.bias.grad) + + print_rank_0('linear_row no parallel_input backward: pass') + +# +# def check_attention(): +# device = get_current_device() +# dtype = torch.float32 +# INPUT_SIZE = HIDDEN_SIZE +# NUM_ATTENTION_HEADS = 2 +# +# i = gpc.get_local_rank(ParallelMode.PARALLEL_1D) +# +# layer = TransformerSelfAttention1D( +# 1, +# HIDDEN_SIZE // NUM_ATTENTION_HEADS, +# HIDDEN_SIZE, +# NUM_ATTENTION_HEADS, +# 0.5 +# ) +# +# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) +# A_master = torch.randn(A_shape, dtype=dtype, device=device) +# torch.distributed.broadcast(A_master, src=0) +# A = A_master.clone() +# A.requires_grad = True +# +# mask_shape = (BATCH_SIZE, NUM_ATTENTION_HEADS // DEPTH, SEQ_LENGTH, SEQ_LENGTH) +# attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device) +# +# out = layer(A, attention_mask) +# assert out.shape == (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) +# print_rank_0('self attention forward: pass') +# +# grad_shape = out.shape +# grad = torch.randn(grad_shape, dtype=dtype, device=device) +# +# out.backward(grad) +# assert A.grad.shape == A.shape +# print_rank_0('self attention backward: pass') +# +# +# def check_mlp(): +# device = get_current_device() +# dtype = torch.float32 +# INPUT_SIZE = HIDDEN_SIZE +# +# i = gpc.get_local_rank(ParallelMode.PARALLEL_1D) +# +# layer = TransformerMLP1D( +# HIDDEN_SIZE, +# HIDDEN_SIZE, +# 4.0 +# ) +# +# A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) +# A_master = torch.randn(A_shape, dtype=dtype, device=device) +# torch.distributed.broadcast(A_master, src=0) +# A = A_master.clone() +# A.requires_grad = True +# +# out = layer(A) +# assert out.shape == (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) +# print_rank_0('mlp forward: pass') +# +# grad_shape = out.shape +# grad = torch.randn(grad_shape, dtype=dtype, device=device) +# +# out.backward(grad) +# assert A.grad.shape == A.shape +# print_rank_0('mlp backward: pass') diff --git a/tests/test_layers/test_2d/common.py b/tests/test_layers/test_2d/common.py new file mode 100644 index 000000000..00011e9a9 --- /dev/null +++ b/tests/test_layers/test_2d/common.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +DEPTH = 2 +BATCH_SIZE = 8 +SEQ_LENGTH = 8 +HIDDEN_SIZE = 8 + + +def check_equal(A, B): + assert torch.allclose(A, B, rtol=1e-5, atol=1e-2) == True diff --git a/tests/test_layers/test_2d/test_2d.py b/tests/test_layers/test_2d/test_2d.py new file mode 100644 index 000000000..994b2d37a --- /dev/null +++ b/tests/test_layers/test_2d/test_2d.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import pytest + +from colossalai.core import global_context as gpc +from colossalai.initialize import init_dist +from test_layer import check_linear, check_layernorm, check_attention, check_mlp, check_transformerlayer +from test_operation import check_AB, check_ABT, check_ATB + +CONFIG = dict( + parallel=dict( + pipeline=dict(size=1), + tensor=dict( + size=4, + mode='2d' + ) + ), +) + + +def check_operations(): + check_AB() + check_ABT() + check_ATB() + + +def check_layer(): + check_linear() + check_layernorm() + check_attention() + check_mlp() + check_transformerlayer() + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2d(): + init_dist(config=CONFIG) + gpc.set_seed() + check_operations() + check_layer() + gpc.destroy() + + +if __name__ == '__main__': + test_2d() diff --git a/tests/test_layers/test_2d/test_layer.py b/tests/test_layers/test_2d/test_layer.py new file mode 100644 index 000000000..b8404a488 --- /dev/null +++ b/tests/test_layers/test_2d/test_layer.py @@ -0,0 +1,248 @@ +import torch +from torch.nn import Parameter + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn import Linear2D, LayerNorm2D, TransformerSelfAttention2D, TransformerMLP2D, TransformerLayer2D +from colossalai.utils import get_current_device, print_rank_0 +from common import HIDDEN_SIZE, DEPTH, BATCH_SIZE, SEQ_LENGTH, check_equal + + +def check_linear(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + OUTPUT_SIZE = 2 * HIDDEN_SIZE + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + layer = Linear2D(INPUT_SIZE, OUTPUT_SIZE) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + W_shape = (INPUT_SIZE, OUTPUT_SIZE) + W_master = torch.randn(W_shape, dtype=dtype, device=device) + torch.distributed.broadcast(W_master, src=0) + W = torch.chunk(W_master, DEPTH, dim=0)[i] + W = torch.chunk(W, DEPTH, dim=-1)[j] + W = W.clone() + W.requires_grad = True + + B_shape = (OUTPUT_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[j] + B = B.clone() + B.requires_grad = True + + layer.weight = Parameter(W) + layer.bias = Parameter(B) + out = layer(A) + + A_master = A_master.clone() + A_master.requires_grad = True + W_master = W_master.clone() + W_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, W_master) + B_master + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + + check_equal(out, C) + print_rank_0('linear forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j] + check_equal(A_grad, A.grad) + + W_grad = W_master.grad + W_grad = torch.chunk(W_grad, DEPTH, dim=0)[i] + W_grad = torch.chunk(W_grad, DEPTH, dim=-1)[j] + check_equal(W_grad, layer.weight.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j] + if i == 0: + check_equal(B_grad, layer.bias.grad) + + print_rank_0('linear backward: pass') + + +def check_layernorm(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + EPS = 1e-12 + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + layernorm = LayerNorm2D(INPUT_SIZE) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + out = layernorm(A) + + A_master = A_master.clone() + A_master.requires_grad = True + E_master = torch.sum(A_master, dim=-1, keepdim=True) + E_master /= INPUT_SIZE + V_master = torch.sum(A_master * A_master, dim=-1, keepdim=True) + V_master /= INPUT_SIZE + V_master = V_master - E_master * E_master + V_master = 1.0 / torch.sqrt(V_master + EPS) + C_master = (A_master - E_master) * V_master + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + + check_equal(out, C) + print_rank_0('layer norm forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j] + check_equal(A_grad, A.grad) + print_rank_0('layer norm backward: pass') + + +def check_attention(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + NUM_ATTENTION_HEADS = 2 + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + layer = TransformerSelfAttention2D( + HIDDEN_SIZE, + NUM_ATTENTION_HEADS, + attention_dropout_prob=0.5, + hidden_dropout_prob=0.5, + ) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + mask_shape = (BATCH_SIZE // DEPTH, NUM_ATTENTION_HEADS // DEPTH, SEQ_LENGTH, SEQ_LENGTH) + attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device) + + out = layer(A, attention_mask) + assert out.shape == (BATCH_SIZE // DEPTH, SEQ_LENGTH, INPUT_SIZE // DEPTH) + print_rank_0('self attention forward: pass') + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + out.backward(grad) + assert A.grad.shape == A.shape + print_rank_0('self attention backward: pass') + + +def check_mlp(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + layer = TransformerMLP2D( + HIDDEN_SIZE, + dropout_prob=0.5, + act_func='gelu', + ) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + out = layer(A) + assert out.shape == (BATCH_SIZE // DEPTH, SEQ_LENGTH, INPUT_SIZE // DEPTH) + print_rank_0('mlp forward: pass') + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + out.backward(grad) + assert A.grad.shape == A.shape + print_rank_0('mlp backward: pass') + + +def check_transformerlayer(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + NUM_ATTENTION_HEADS = 2 + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + layer = TransformerLayer2D( + HIDDEN_SIZE, + NUM_ATTENTION_HEADS, + act_func='gelu', + attention_dropout_prob=0.5, + hidden_dropout_prob=0.5) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + mask_shape = (BATCH_SIZE // DEPTH, NUM_ATTENTION_HEADS // DEPTH, SEQ_LENGTH, SEQ_LENGTH) + attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device) + + out = layer(A, attention_mask) + assert out.shape == (BATCH_SIZE // DEPTH, SEQ_LENGTH, INPUT_SIZE // DEPTH) + print_rank_0('transformerlayer forward: pass') + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + out.backward(grad) + assert A.grad.shape == A.shape + print_rank_0('transformerlayer backward: pass') diff --git a/tests/test_layers/test_2d/test_operation.py b/tests/test_layers/test_2d/test_operation.py new file mode 100644 index 000000000..74772a837 --- /dev/null +++ b/tests/test_layers/test_2d/test_operation.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_2d import Matmul_AB_2D, Matmul_ABT_2D, Matmul_ATB_2D +from colossalai.utils import get_current_device +from colossalai.utils import print_rank_0 +from common import check_equal, BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE, DEPTH + + +def check_AB(): + data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR) + + dtype = torch.float + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[i] + B = torch.chunk(B, DEPTH, dim=-1)[j] + B = B.clone() + B.requires_grad = True + + out_shape = (BATCH_SIZE // DEPTH, SEQ_LENGTH, 4 * HIDDEN_SIZE // DEPTH) + + out = Matmul_AB_2D.apply( + A, B, + DEPTH, + out_shape, + i, j, + ParallelMode.PARALLEL_2D_ROW, + ParallelMode.PARALLEL_2D_COL, + data_parallel_rank, + pipeline_parallel_rank, + pipeline_parallel_size, + tensor_parallel_size + ) + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + A_master = A_master.clone() + A_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, B_master) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + # check forward correctness + check_equal(out, C) + print_rank_0('AB forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j] + # check backward correctness + check_equal(A_grad, A.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] + # check backward correctness + check_equal(B_grad, B.grad) + print_rank_0('AB backward: pass') + + +def check_ABT(): + data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR) + + dtype = torch.float + device = get_current_device() + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + C_master = torch.randn(C_shape, dtype=dtype, device=device) + torch.distributed.broadcast(C_master, src=0) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = C.clone() + C.requires_grad = True + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[i] + B = torch.chunk(B, DEPTH, dim=-1)[j] + B = B.clone() + B.requires_grad = True + + out = Matmul_ABT_2D.apply( + C, B, + DEPTH, (BATCH_SIZE // DEPTH, SEQ_LENGTH, HIDDEN_SIZE // DEPTH), + i, j, + ParallelMode.PARALLEL_2D_ROW, + ParallelMode.PARALLEL_2D_COL, + data_parallel_rank, + pipeline_parallel_rank, + pipeline_parallel_size, + tensor_parallel_size + ) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + C_master = C_master.clone() + C_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + A_master = torch.matmul(C_master, B_master.transpose(0, 1)) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + check_equal(out, A) + print_rank_0('ABT forward: pass') + + grad_shape = A_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + + # backward + out.backward(grad) + + A_master.backward(grad_master) + C_grad = C_master.grad + C_grad = torch.chunk(C_grad, DEPTH, dim=0)[i] + C_grad = torch.chunk(C_grad, DEPTH, dim=-1)[j] + check_equal(C_grad, C.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] + check_equal(B_grad, B.grad) + print_rank_0('ABT backward: pass') + + +def check_ATB(): + data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR) + + device = get_current_device() + dtype = torch.float + + j = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) + i = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + C_master = torch.randn(C_shape, dtype=dtype, device=device) + torch.distributed.broadcast(C_master, src=0) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = C.clone() + C.requires_grad = True + + out = Matmul_ATB_2D.apply( + A, C, + DEPTH, (HIDDEN_SIZE // DEPTH, 4 * HIDDEN_SIZE // DEPTH), + i, j, + ParallelMode.PARALLEL_2D_ROW, + ParallelMode.PARALLEL_2D_COL, + data_parallel_rank, + pipeline_parallel_rank, + pipeline_parallel_size, + tensor_parallel_size + ) + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + A_master = A_master.clone() + A_master.requires_grad = True + C_master = C_master.clone() + C_master.requires_grad = True + B_master = torch.matmul( + A_master.view(-1, A_master.shape[-1]).transpose(0, 1), + C_master.view(-1, C_master.shape[-1])) + B = torch.chunk(B_master, DEPTH, dim=0)[i] + B = torch.chunk(B, DEPTH, dim=-1)[j] + check_equal(out, B) + print_rank_0('ATB forward: pass') + + grad_shape = B_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + + out.backward(grad) + + B_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[j] + check_equal(A_grad, A.grad) + + C_grad = C_master.grad + C_grad = torch.chunk(C_grad, DEPTH, dim=0)[i] + C_grad = torch.chunk(C_grad, DEPTH, dim=-1)[j] + check_equal(C_grad, C.grad) + print_rank_0('ATB backward: pass') diff --git a/tests/test_layers/test_2p5d/common.py b/tests/test_layers/test_2p5d/common.py new file mode 100644 index 000000000..d7078b37d --- /dev/null +++ b/tests/test_layers/test_2p5d/common.py @@ -0,0 +1,11 @@ +import torch + +TESSERACT_DIM = 2 +TESSERACT_DEP = 2 +BATCH_SIZE = 8 +SEQ_LENGTH = 8 +HIDDEN_SIZE = 8 + + +def check_equal(A, B): + assert torch.allclose(A, B, rtol=1e-5, atol=1e-2) == True diff --git a/tests/test_layers/test_2p5d/test.sh b/tests/test_layers/test_2p5d/test.sh new file mode 100644 index 000000000..3eb567435 --- /dev/null +++ b/tests/test_layers/test_2p5d/test.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python -m torch.distributed.launch test_2p5d.py --nproc_per_node 8 --host $HOST --port 29516 --world_size 8 diff --git a/tests/test_layers/test_2p5d/test_2p5d.py b/tests/test_layers/test_2p5d/test_2p5d.py new file mode 100644 index 000000000..488d38d87 --- /dev/null +++ b/tests/test_layers/test_2p5d/test_2p5d.py @@ -0,0 +1,41 @@ +import pytest + +from colossalai.core import global_context as gpc +from colossalai.initialize import init_dist +from test_layer import check_linear, check_layernorm, check_attention, check_mlp, check_transformerlayer +from test_operation import check_AB, check_ABT, check_ATB + +CONFIG = dict( + parallel=dict( + pipeline=dict(size=1), + tensor=dict(size=8, mode='2.5d', depth=2), + ), +) + + +def check_operations(): + check_AB() + check_ABT() + check_ATB() + + +def check_layer(): + check_linear() + check_layernorm() + check_attention() + check_mlp() + check_transformerlayer() + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2p5d(): + init_dist(config=CONFIG) + gpc.set_seed() + check_layer() + check_operations() + gpc.destroy() + + +if __name__ == '__main__': + test_2p5d() diff --git a/tests/test_layers/test_2p5d/test_layer.py b/tests/test_layers/test_2p5d/test_layer.py new file mode 100644 index 000000000..ffe4678b9 --- /dev/null +++ b/tests/test_layers/test_2p5d/test_layer.py @@ -0,0 +1,265 @@ +from torch.nn import Parameter + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn import (Linear2p5D, LayerNorm2p5D, TransformerSelfAttention2p5D, TransformerMLP2p5D, + TransformerLayer2p5D) +from colossalai.utils import get_current_device +from colossalai.utils import print_rank_0 +from common import * + + +def check_linear(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + OUTPUT_SIZE = 2 * HIDDEN_SIZE + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + layer = Linear2p5D( + INPUT_SIZE, + OUTPUT_SIZE, + dtype=dtype, + skip_bias_add=False) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + W_shape = (INPUT_SIZE, OUTPUT_SIZE) + W_master = torch.randn(W_shape, dtype=dtype, device=device) + torch.distributed.broadcast(W_master, src=0) + W = torch.chunk(W_master, TESSERACT_DIM, dim=0)[i] + W = torch.chunk(W, TESSERACT_DIM, dim=-1)[j] + W = W.clone() + W.requires_grad = True + + B_shape = (OUTPUT_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[j] + B = B.clone() + B.requires_grad = True + + layer.weight = Parameter(W) + layer.bias = Parameter(B) + out = layer(A) + bias = layer.bias + + A_master = A_master.clone() + A_master.requires_grad = True + W_master = W_master.clone() + W_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, W_master) + B_master + C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i] + C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j] + + check_equal(out, C) + print_rank_0('linear forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i] + grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j] + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i] + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(A_grad, A.grad) + + W_grad = W_master.grad + W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=0)[i] + W_grad = torch.chunk(W_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(W_grad, layer.weight.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[j] + if i == 0: + check_equal(B_grad, layer.bias.grad) + + print_rank_0('linear backward: pass') + + +def check_layernorm(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + EPS = 1e-12 + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + layernorm = LayerNorm2p5D( + INPUT_SIZE, + dtype=dtype) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + out = layernorm(A) + + A_master = A_master.clone() + A_master.requires_grad = True + E_master = torch.sum(A_master, dim=-1, keepdim=True) + E_master /= INPUT_SIZE + V_master = torch.sum(A_master * A_master, dim=-1, keepdim=True) + V_master /= INPUT_SIZE + V_master = V_master - E_master * E_master + V_master = 1.0 / torch.sqrt(V_master + EPS) + C_master = (A_master - E_master) * V_master + C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i] + C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j] + + check_equal(out, C) + print_rank_0('layer norm forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i] + grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j] + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i] + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(A_grad, A.grad) + print_rank_0('layer norm backward: pass') + + +def check_attention(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + NUM_ATTENTION_HEADS = 2 + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + layer = TransformerSelfAttention2p5D( + HIDDEN_SIZE, NUM_ATTENTION_HEADS, + attention_dropout_prob=0.5, + hidden_dropout_prob=0.5, + dtype=dtype, + ) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + mask_shape = (BATCH_SIZE // TESSERACT_DIM, NUM_ATTENTION_HEADS // TESSERACT_DIM, SEQ_LENGTH, SEQ_LENGTH) + attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device) + + out = layer(A, attention_mask) + assert out.shape == (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, INPUT_SIZE // TESSERACT_DIM) + print_rank_0('self attention forward: pass') + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + out.backward(grad) + assert A.grad.shape == A.shape + print_rank_0('self attention backward: pass') + + +def check_mlp(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + layer = TransformerMLP2p5D( + HIDDEN_SIZE, + mlp_ratio=1, + dropout_prob=0.5, + act_func='gelu', + dtype=dtype, + ) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + out = layer(A) + assert out.shape == (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, INPUT_SIZE // TESSERACT_DIM) + print_rank_0('mlp forward: pass') + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + out.backward(grad) + assert A.grad.shape == A.shape + print_rank_0('mlp backward: pass') + + +def check_transformerlayer(): + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + NUM_ATTENTION_HEADS = 2 + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + layer = TransformerLayer2p5D( + HIDDEN_SIZE, + NUM_ATTENTION_HEADS, + act_func='gelu', + attention_dropout_prob=0.5, + hidden_dropout_prob=0.5, + dtype=dtype, + ) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + mask_shape = (BATCH_SIZE // TESSERACT_DIM, NUM_ATTENTION_HEADS // TESSERACT_DIM, SEQ_LENGTH, SEQ_LENGTH) + attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device) + + out = layer(A, attention_mask) + assert out.shape == (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, INPUT_SIZE // TESSERACT_DIM) + print_rank_0('transformerlayer forward: pass') + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + out.backward(grad) + assert A.grad.shape == A.shape + print_rank_0('transformerlayer backward: pass') diff --git a/tests/test_layers/test_2p5d/test_operation.py b/tests/test_layers/test_2p5d/test_operation.py new file mode 100644 index 000000000..5ffaafe2c --- /dev/null +++ b/tests/test_layers/test_2p5d/test_operation.py @@ -0,0 +1,239 @@ +import torch + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn.layer.parallel_2p5d._operation import Matmul_AB_2p5D, Matmul_ABT_2p5D, \ + Matmul_ATB_2p5D +from colossalai.utils import get_current_device +from colossalai.utils import print_rank_0 +from common import * + + +def check_AB(): + data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR) + + dtype = torch.float + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[i] + B = torch.chunk(B, TESSERACT_DIM, dim=-1)[j] + B = B.clone() + B.requires_grad = True + + out_shape = (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, 4 * HIDDEN_SIZE // TESSERACT_DIM) + out = Matmul_AB_2p5D.apply( + A, B, + TESSERACT_DIM, TESSERACT_DEP, out_shape, + i, j, k, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + data_parallel_rank, + pipeline_parallel_rank, + pipeline_parallel_size, + tensor_parallel_size) + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + A_master = A_master.clone() + A_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, B_master) + C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i] + C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j] + # check forward correctness + check_equal(out, C) + print_rank_0('AB forward: pass') + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i] + grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j] + + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i] + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j] + # check backward correctness + check_equal(A_grad, A.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[i] + B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[j] + # check backward correctness + check_equal(B_grad, B.grad) + print_rank_0('AB backward: pass') + + +def check_ABT(): + data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR) + + dtype = torch.float + device = get_current_device() + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + C_master = torch.randn(C_shape, dtype=dtype, device=device) + torch.distributed.broadcast(C_master, src=0) + C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i] + C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j] + C = C.clone() + C.requires_grad = True + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[i] + B = torch.chunk(B, TESSERACT_DIM, dim=-1)[j] + B = B.clone() + B.requires_grad = True + + out = Matmul_ABT_2p5D.apply( + C, B, + TESSERACT_DIM, TESSERACT_DEP, (BATCH_SIZE // TESSERACT_DIM, SEQ_LENGTH, HIDDEN_SIZE // TESSERACT_DIM), + i, j, k, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + data_parallel_rank, + pipeline_parallel_rank, + pipeline_parallel_size, + tensor_parallel_size) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + C_master = C_master.clone() + C_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + A_master = torch.matmul(C_master, B_master.transpose(0, 1)) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + check_equal(out, A) + print_rank_0('ABT forward: pass') + + grad_shape = A_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i] + grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j] + + # backward + out.backward(grad) + + A_master.backward(grad_master) + C_grad = C_master.grad + C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=0)[i] + C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(C_grad, C.grad) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=0)[i] + B_grad = torch.chunk(B_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(B_grad, B.grad) + print_rank_0('ABT backward: pass') + + +def check_ATB(): + data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) + pipeline_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank( + ParallelMode.PIPELINE) + pipeline_parallel_size = 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size( + ParallelMode.PIPELINE) + tensor_parallel_size = gpc.get_world_size(ParallelMode.TENSOR) + + device = get_current_device() + dtype = torch.float + + i = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) + j = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) + k = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, TESSERACT_DIM, dim=0)[i] + A = torch.chunk(A, TESSERACT_DIM, dim=-1)[j] + A = A.clone() + A.requires_grad = True + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + C_master = torch.randn(C_shape, dtype=dtype, device=device) + torch.distributed.broadcast(C_master, src=0) + C = torch.chunk(C_master, TESSERACT_DIM, dim=0)[i] + C = torch.chunk(C, TESSERACT_DIM, dim=-1)[j] + C = C.clone() + C.requires_grad = True + + out = Matmul_ATB_2p5D.apply( + A, C, + TESSERACT_DIM, TESSERACT_DEP, (HIDDEN_SIZE // TESSERACT_DIM, 4 * HIDDEN_SIZE // TESSERACT_DIM), + i, j, k, + ParallelMode.PARALLEL_2P5D_ROW, + ParallelMode.PARALLEL_2P5D_COL, + ParallelMode.PARALLEL_2P5D_DEP, + data_parallel_rank, + pipeline_parallel_rank, + pipeline_parallel_size, + tensor_parallel_size) + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + A_master = A_master.clone() + A_master.requires_grad = True + C_master = C_master.clone() + C_master.requires_grad = True + B_master = torch.matmul( + A_master.view(-1, A_master.shape[-1]).transpose(0, 1), + C_master.view(-1, C_master.shape[-1])) + B = torch.chunk(B_master, TESSERACT_DIM, dim=0)[i] + B = torch.chunk(B, TESSERACT_DIM, dim=-1)[j] + check_equal(out, B) + print_rank_0('ATB forward: pass') + + grad_shape = B_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, TESSERACT_DIM, dim=0)[i] + grad = torch.chunk(grad, TESSERACT_DIM, dim=-1)[j] + + out.backward(grad) + + B_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=0)[i] + A_grad = torch.chunk(A_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(A_grad, A.grad) + + C_grad = C_master.grad + C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=0)[i] + C_grad = torch.chunk(C_grad, TESSERACT_DIM, dim=-1)[j] + check_equal(C_grad, C.grad) + print_rank_0('ATB backward: pass') diff --git a/tests/test_layers/test_3d/common.py b/tests/test_layers/test_3d/common.py new file mode 100644 index 000000000..c85046855 --- /dev/null +++ b/tests/test_layers/test_3d/common.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import torch + +DEPTH = 2 +BATCH_SIZE = 512 +SEQ_LENGTH = 128 +HIDDEN_SIZE = 512 +NUM_CLASSES = 10 +NUM_BLOCKS = 6 +IMG_SIZE = 32 + +def check_equal(A, B): + return torch.allclose(A, B, rtol=1e-5, atol=1e-2) diff --git a/tests/test_layers/test_3d/test.sh b/tests/test_layers/test_3d/test.sh new file mode 100644 index 000000000..80edf05bb --- /dev/null +++ b/tests/test_layers/test_3d/test.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +python -m torch.distributed.launch test_2d.py --nproc_per_node 8 test_3d.py --host $HOST --port 29516 --world_size 8 + +# expected test output +# distributed environment initialized +# AB forward: pass +# AB backward: pass +# ABT forward: pass +# ABT backward: pass +# ATB forward: pass +# ATB backward: pass +# linear backward: pass +# linear backward: pass +# layer norm forward: pass +# layer norm backward: pass +# self attention forward: pass +# self attention backward: pass +# mlp forward: pass +# mlp backward: pass +# transformerlayer forward: pass +# transformerlayer backward: pass \ No newline at end of file diff --git a/tests/test_layers/test_3d/test_3d.py b/tests/test_layers/test_3d/test_3d.py new file mode 100644 index 000000000..21c560820 --- /dev/null +++ b/tests/test_layers/test_3d/test_3d.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from colossalai.initialize import init_dist + +from test_layer import * +from test_operation import * + +CONFIG = dict(parallel=dict(pipeline=1, tensor=dict(mode='3d', size=8)), + seed=0) + + +def check_operations(): + check_AB() + check_ABT() + check_ATB() + check_add() + check_mul() + check_sum() + # check_pooler() + + +def check_layer(): + logger = get_global_dist_logger() + liear_fwd_time, linear_bwd_time = check_linear() + norm_fwd_time, norm_bwd_time = check_layernorm() + attn_fwd_time, attn_bwd_time = check_attention() + mlp_fwd_time, mlp_bwd_time = check_mlp() + head_fwd_time, head_bwd_time = check_head() + embed_fwd_time, embed_bwd_time = check_embed() + loss_fwd_time, loss_bwd_time = check_loss() + block_fwd_time = norm_fwd_time + attn_fwd_time + norm_fwd_time + mlp_fwd_time + block_bwd_time = norm_bwd_time + attn_bwd_time + norm_bwd_time + mlp_bwd_time + fwd_time = embed_fwd_time + NUM_BLOCKS * block_fwd_time + norm_fwd_time + head_fwd_time + loss_fwd_time + bwd_time = embed_bwd_time + NUM_BLOCKS * block_bwd_time + norm_bwd_time + head_bwd_time + loss_bwd_time + logger.info('ViT forward time: {:.3f} s | backward time: {:.3f} s'.format( + fwd_time, bwd_time), + ranks=[0]) + + +def _test_main(): + # init dist + init_dist(CONFIG) + logger = get_global_dist_logger() + logger.info('Distributed environment is initialzied.', ranks=[0]) + + global_context.set_seed() + torch.backends.cudnn.benchmark = True + + # check operation + check_operations() + + # check layers + check_layer() + + +if __name__ == '__main__': + _test_main() diff --git a/tests/test_layers/test_3d/test_conn.py b/tests/test_layers/test_3d/test_conn.py new file mode 100644 index 000000000..83cb32dd5 --- /dev/null +++ b/tests/test_layers/test_3d/test_conn.py @@ -0,0 +1,19 @@ +import torch +import torch.distributed as dist + +from colossalai.initialize import parse_args +from colossalai.utils import get_current_device + +ARGS = parse_args() +size = ARGS.world_size +rank = ARGS.local_rank + +init_method = f'tcp://{ARGS.host}:{ARGS.port}' +dist.init_process_group(backend='nccl', rank=rank, world_size=size, init_method=init_method) +print('Rank {} / {}'.format(dist.get_rank(), dist.get_world_size())) + +SIZE = 8 +tensor = torch.randn(SIZE) +tensor = tensor.to(get_current_device()) +dist.all_reduce(tensor) +print('Rank {0}: {1}'.format(rank, tensor.detach().cpu().numpy().tolist())) diff --git a/tests/test_layers/test_3d/test_layer.py b/tests/test_layers/test_3d/test_layer.py new file mode 100644 index 000000000..db5de22a4 --- /dev/null +++ b/tests/test_layers/test_3d/test_layer.py @@ -0,0 +1,640 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math +import time + +import numpy as np +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context +from colossalai.logging import get_global_dist_logger +from colossalai.registry import LAYERS, LOSSES +from colossalai.utils import get_current_device, print_rank_0 + +from common import * + + +def check_linear(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + OUTPUT_SIZE = 2 * HIDDEN_SIZE + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + layer = LAYERS.get_module('Linear3D')(INPUT_SIZE, + OUTPUT_SIZE, + ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_WEIGHT, + dtype=dtype, + bias=True) + torch.nn.init.zeros_(layer.bias) + torch.nn.init.ones_(layer.weight) + layer = layer.to(device) + layer_master = torch.nn.Linear(INPUT_SIZE, OUTPUT_SIZE) + torch.nn.init.zeros_(layer_master.bias) + torch.nn.init.ones_(layer_master.weight) + layer_master = layer_master.to(device) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + fwd_start = time.time() + out = layer(A) + fwd_end = time.time() + print_rank_0( + 'linear forward: {0} --> {1} | {2:.3f} s'.format( + tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) + A_master = A_master.clone() + A_master.requires_grad = True + C_master = layer_master(A_master) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = torch.chunk(C, DEPTH, dim=0)[k] + logger.info('Rank {} linear forward: {}'.format(rank, check_equal(out, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, + dtype=dtype, + device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + grad = torch.chunk(grad, DEPTH, dim=0)[k] + + bwd_start = time.time() + out.backward(grad) + bwd_end = time.time() + print_rank_0('linear backward: {:.3f} s'.format(bwd_end - bwd_start), + logger) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + logger.info('Rank {} linear backward (input_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + + B_grad = layer_master.weight.grad.transpose(0, 1) + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i] + logger.info('Rank {} linear backward (weight_grad): {}'.format( + rank, check_equal(B_grad, layer.weight.grad))) + + if j == k: + bias_grad = layer_master.bias.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[j] + bias_grad = torch.chunk(bias_grad, DEPTH)[i] + logger.info('Rank {} linear backward (bias_grad): {}'.format( + rank, check_equal(bias_grad, layer.bias.grad))) + else: + logger.info('Rank {} linear backward (bias_grad): {}'.format( + rank, + # np.count_nonzero(layer.bias.grad.detach().cpu().numpy()) == 0)) + layer.bias.grad is None)) + + return fwd_end - fwd_start, bwd_end - bwd_start + + +def check_layernorm(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + norm = LAYERS.get_module('LayerNorm3D')(INPUT_SIZE, + ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_WEIGHT, + eps=1e-6, + dtype=dtype) + norm = norm.to(device) + norm_master = torch.nn.LayerNorm(INPUT_SIZE, eps=1e-6) + norm_master = norm_master.to(device) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + fwd_start = time.time() + out = norm(A) + fwd_end = time.time() + print_rank_0( + 'layer norm forward: pass | {0} --> {1} | {2:.3f} s'.format( + tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) + + A_master = A_master.clone() + A_master.requires_grad = True + C_master = norm_master(A_master) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[k] + C = torch.chunk(C, DEPTH, dim=0)[j] + logger.info('Rank {} layernorm forward: {}'.format(rank, + check_equal(out, C))) + # time.sleep(rank) + # logger.info('Rank {0} master:\n{1}\nRank {0} out:\n{2}\nRank {0} true:\n{3}\n'. + # format(rank, + # C_master.detach().cpu().numpy().tolist(), + # out.detach().cpu().numpy().tolist(), + # C.detach().cpu().numpy().tolist())) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[k] + grad = torch.chunk(grad, DEPTH, dim=0)[j] + + bwd_start = time.time() + out.backward(grad) + bwd_end = time.time() + print_rank_0( + 'layer norm backward: pass | {:.3f} s'.format(bwd_end - bwd_start), + logger) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + logger.info('Rank {} layernorm backward (input_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + + if j == k: + bias_grad = norm_master.weight.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[j] + bias_grad = torch.chunk(bias_grad, DEPTH)[i] + logger.info('Rank {} linear backward (weight_grad): {}'.format( + rank, check_equal(bias_grad, norm.weight.grad))) + else: + logger.info('Rank {} linear backward (weight_grad): {}'.format( + rank, + # np.count_nonzero(layer.bias.grad.detach().cpu().numpy()) == 0)) + norm.weight.grad is None)) + + if j == k: + bias_grad = norm_master.bias.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[j] + bias_grad = torch.chunk(bias_grad, DEPTH)[i] + logger.info('Rank {} linear backward (bias_grad): {}'.format( + rank, check_equal(bias_grad, norm.bias.grad))) + else: + logger.info('Rank {} linear backward (bias_grad): {}'.format( + rank, + # np.count_nonzero(layer.bias.grad.detach().cpu().numpy()) == 0)) + norm.bias.grad is None)) + + return fwd_end - fwd_start, bwd_end - bwd_start + + +def check_attention(): + rank = torch.distributed.get_rank() + device = get_current_device() + logger = get_global_dist_logger() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + NUM_ATTENTION_HEADS = 2 + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + layer = LAYERS.get_module('ViTSelfAttention3D')(HIDDEN_SIZE, + NUM_ATTENTION_HEADS, + 0., + 0.1, + dtype=dtype, + bias=True) + layer = layer.to(device) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + mask_shape = (BATCH_SIZE // DEPTH, NUM_ATTENTION_HEADS // DEPTH, + SEQ_LENGTH // DEPTH, SEQ_LENGTH // DEPTH) + attention_mask = torch.zeros(mask_shape, dtype=dtype, device=device) + + fwd_start = time.time() + out = layer(A) + fwd_end = time.time() + print_rank_0( + 'self attention forward: pass | {0} --> {1} | {2:.3f} s'.format( + tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + bwd_start = time.time() + out.backward(grad) + bwd_end = time.time() + print_rank_0( + 'self attention backward: pass | {:.3f} s'.format(bwd_end - bwd_start), + logger) + + return fwd_end - fwd_start, bwd_end - bwd_start + + +def check_mlp(): + rank = torch.distributed.get_rank() + device = get_current_device() + logger = get_global_dist_logger() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + layer = LAYERS.get_module('ViTMLP3D')(HIDDEN_SIZE, + 1, + 0.1, + 'gelu', + dtype=dtype, + bias=True) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + fwd_start = time.time() + out = layer(A) + fwd_end = time.time() + print_rank_0( + 'mlp forward: pass | {0} --> {1} | {2:.3f} s'.format( + tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) + + grad_shape = out.shape + grad = torch.randn(grad_shape, dtype=dtype, device=device) + + bwd_start = time.time() + out.backward(grad) + bwd_end = time.time() + print_rank_0('mlp backward: pass | {:.3f} s'.format(bwd_end - bwd_start), + logger) + + return fwd_end - fwd_start, bwd_end - bwd_start + + +class Testvithead(torch.nn.Module): + def __init__(self, in_features, out_features, bias=True): + super().__init__() + self.linear = torch.nn.Linear(in_features, out_features, bias=bias) + + def forward(self, x): + x = x[:, 0] + x = self.linear(x) + return x + + +def check_head(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + device = get_current_device() + dtype = torch.float32 + INPUT_SIZE = HIDDEN_SIZE + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + head = LAYERS.get_module('ViTHead3D')(INPUT_SIZE, + NUM_CLASSES, + dtype=dtype, + bias=True) + torch.nn.init.zeros_(head.linear.bias) + torch.nn.init.ones_(head.linear.weight) + head = head.to(device) + + layer = Testvithead(INPUT_SIZE, NUM_CLASSES, bias=True) + torch.nn.init.zeros_(layer.linear.bias) + torch.nn.init.ones_(layer.linear.weight) + layer = layer.to(device) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + fwd_start = time.time() + out = head(A) + fwd_end = time.time() + print_rank_0( + 'head forward: pass | {0} --> {1} | {2:.3f} s'.format( + tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) + A_master = A_master.clone() + A_master.requires_grad = True + C_master = layer(A_master) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = torch.chunk(C, DEPTH, dim=0)[k] + logger.info('Rank {} head forward: {}'.format(rank, check_equal(out, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, + dtype=dtype, + device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + grad = torch.chunk(grad, DEPTH, dim=0)[k] + + bwd_start = time.time() + out.backward(grad) + bwd_end = time.time() + print_rank_0('head backward: pass | {:.3f} s'.format(bwd_end - bwd_start), + logger) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + # if j == 0: + logger.info('Rank {} head backward (input_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + # else: + # logger.info('Rank {} head backward (input_grad): {}'.format( + # # rank, check_equal(A_grad, A.grad))) + # rank, + # A.grad is None)) + + B_grad = layer.linear.weight.grad.transpose(0, 1) + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] + pad_shape = (B_grad.shape[0], math.ceil(B_grad.shape[-1] / DEPTH) * DEPTH - + B_grad.shape[-1]) + B_grad = torch.cat( + [B_grad, torch.zeros(pad_shape, dtype=dtype, device=device)], dim=-1) + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i] + logger.info('Rank {} head backward (weight_grad): {}'.format( + rank, check_equal(B_grad, head.linear.weight.grad))) + + if j == k: + bias_grad = layer.linear.bias.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[j] + pad_shape = (math.ceil(bias_grad.shape[0] / DEPTH) * DEPTH - + bias_grad.shape[0], ) + bias_grad = torch.cat( + [bias_grad, + torch.zeros(pad_shape, dtype=dtype, device=device)]) + bias_grad = torch.chunk(bias_grad, DEPTH)[i] + logger.info('Rank {} head backward (bias_grad): {}'.format( + rank, check_equal(bias_grad, head.linear.bias.grad))) + else: + logger.info('Rank {} head backward (bias_grad): {}'.format( + rank, + # np.count_nonzero( + # head.linear.bias.grad.detach().cpu().numpy()) == 0)) + head.linear.bias.grad is None)) + + return fwd_end - fwd_start, bwd_end - bwd_start + + +class Testvitembed(torch.nn.Module): + def __init__(self, img_size: int, patch_size: int, in_chans: int, + embed_size: int, drop_prob: float) -> None: + super().__init__() + self.proj = torch.nn.Conv2d(in_chans, + embed_size, + kernel_size=patch_size, + stride=patch_size) + num_patches = (img_size // patch_size)**2 + self.cls_token = torch.nn.Parameter(torch.zeros(1, 1, embed_size)) + self.pos_embed = torch.nn.Parameter( + torch.zeros(1, num_patches + 1, embed_size)) + self.pos_drop = torch.nn.Dropout(drop_prob) + + def forward(self, x): + x = self.proj(x) + x = x.flatten(2).transpose(1, 2) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) + x = torch.cat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + return x + + +def check_embed(): + rank = torch.distributed.get_rank() + device = get_current_device() + logger = get_global_dist_logger() + dtype = torch.float32 + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + layer = LAYERS.get_module('ViTPatchEmbedding3D')(IMG_SIZE, 4, 3, + HIDDEN_SIZE, 0.) + torch.nn.init.zeros_(layer.proj.bias) + torch.nn.init.ones_(layer.proj.weight) + torch.nn.init.ones_(layer.cls_token) + torch.nn.init.ones_(layer.pos_embed) + layer = layer.to(device) + + layer_master = Testvitembed(IMG_SIZE, 4, 3, HIDDEN_SIZE, 0.) + torch.nn.init.zeros_(layer_master.proj.bias) + torch.nn.init.ones_(layer_master.proj.weight) + torch.nn.init.ones_(layer_master.cls_token) + torch.nn.init.ones_(layer_master.pos_embed) + layer_master = layer_master.to(device) + + A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = A_master.clone() + A.requires_grad = True + + fwd_start = time.time() + out = layer(A) + fwd_end = time.time() + print_rank_0( + 'embedding forward: pass | {0} --> {1} | {2:.3f} s'.format( + tuple(A.shape), tuple(out.shape), fwd_end - fwd_start), logger) + # out_cls = out[:, 0] + # out_tensor = out[:, 1:] + + A_master = A_master.clone() + A_master.requires_grad = True + C_master = layer_master(A_master) + # if j == 0: + # C_cls = C_master[:, 0] + # C_cls = torch.chunk(C_cls, DEPTH, dim=0)[i] + # C_cls = torch.chunk(C_cls, DEPTH, dim=-1)[k] + # logger.info('Rank {} embed forward (cls): {}'.format( + # rank, check_equal(out_cls, C_cls))) + # C = C_master[:, 1:] + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[k] + C = torch.chunk(C, DEPTH, dim=0)[j] + logger.info('Rank {} embed forward: {}'.format(rank, check_equal(out, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, + dtype=dtype, + device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + # cls_grad = grad_master[:, 0] + # cls_grad = torch.chunk(cls_grad, DEPTH, dim=0)[i] + # cls_grad = torch.chunk(cls_grad, DEPTH, dim=-1)[k] + # grad = grad_master[:, 1:] + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[k] + grad = torch.chunk(grad, DEPTH, dim=0)[j] + # grad = torch.cat((torch.unsqueeze(cls_grad, 1), grad), dim=1) + bwd_start = time.time() + out.backward(grad) + bwd_end = time.time() + print_rank_0( + 'embedding backward: pass | {:.3f} s'.format(bwd_end - bwd_start), + logger) + + C_master.backward(grad_master) + # A_grad = A_master.grad + # logger.info('Rank {} embed backward (input_grad): {}'.format( + # rank, check_equal(A_grad, A.grad))) + # time.sleep(0.1 * rank) + # logger.info( + # 'Rank {0} master:\n{1}\nRank {0} out:\n{2}\nRank {0} true:\n{3}\n'. + # format(rank, + # A_master.grad.detach().cpu().numpy().tolist(), + # A.grad.detach().cpu().numpy().tolist(), + # A_grad.detach().cpu().numpy().tolist()), ranks=[0]) + + cls_grad_master = layer_master.cls_token.grad + cls_grad = torch.chunk(cls_grad_master, DEPTH, dim=-1)[k] + # if j == 0: + logger.info('Rank {} embed backward (cls_grad): {}'.format( + rank, check_equal(cls_grad, layer.cls_token.grad))) + # else:. + # logger.info('Rank {} embed backward (cls_grad): {}'.format( + # rank, + # layer.cls_token.grad is None or np.count_nonzero( + # layer.cls_token.grad.detach().cpu().numpy()) == 0)) + + pos_grad_master = layer_master.pos_embed.grad + pos_grad = torch.chunk(pos_grad_master, DEPTH, dim=-1)[k] + logger.info('Rank {} embed backward (pos_embed_grad): {}'.format( + rank, check_equal(pos_grad, layer.pos_embed.grad))) + # if i == 0: + # pos_cls_grad = pos_grad[:, 0] + # pos_tensor_grad = pos_grad[:, 1:] + # pos_tensor_grad = torch.chunk(pos_tensor_grad, DEPTH, dim=1)[j] + # if j == 0: + # logger.info('Rank {} embed backward (pos_embed_grad): {}'.format( + # rank, + # check_equal( + # torch.cat( + # (torch.unsqueeze(pos_cls_grad, 1), pos_tensor_grad), + # dim=1), layer.pos_embed.grad))) + # else: + # logger.info('Rank {} embed backward (pos_embed_grad): {}'.format( + # rank, check_equal(pos_tensor_grad, layer.pos_embed.grad[:, + # 1:]))) + # else: + # logger.info('Rank {} embed backward (pos_embed_grad): {}'.format( + # rank, layer.pos_embed.grad is None)) + + B_grad = layer_master.proj.weight.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] + logger.info('Rank {} embed backward (proj_weight_grad): {}'.format( + rank, check_equal(B_grad, layer.proj.weight.grad))) + + bias_grad = layer_master.proj.bias.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[k] + logger.info('Rank {} embed backward (proj_bias_grad): {}'.format( + rank, check_equal(bias_grad, layer.proj.bias.grad))) + + return fwd_end - fwd_start, bwd_end - bwd_start + + +def check_loss(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + device = get_current_device() + dtype = torch.float32 + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + criterion = LOSSES.get_module('CrossEntropyLoss3D')( + ParallelMode.PARALLEL_3D_INPUT, ParallelMode.PARALLEL_3D_WEIGHT) + criterion_master = torch.nn.CrossEntropyLoss() + + out_shape = (BATCH_SIZE, NUM_CLASSES) + out_master = torch.randn(out_shape, dtype=dtype, device=device) + target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE, ), + dtype=torch.long, + device=device) + torch.distributed.broadcast(out_master, src=0) + torch.distributed.broadcast(target_master, src=0) + out = torch.chunk(out_master, DEPTH, dim=0)[i] + out = torch.chunk(out, DEPTH, dim=-1)[k] + out = torch.chunk(out, DEPTH, dim=0)[j] + out = out.clone() + out.requires_grad = True + + fwd_start = time.time() + loss = criterion(out, target_master) + fwd_end = time.time() + print_rank_0( + 'loss forward: pass | {0} --> {1} | {2:.3f} s'.format( + tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start), logger) + + out_master = out_master.clone() + out_master.requires_grad = True + loss_master = criterion_master(out_master, target_master) + logger.info('Rank {} CrossEntropyLoss forward: {}'.format( + rank, check_equal(loss, loss_master))) + + bwd_start = time.time() + loss.backward() + bwd_end = time.time() + print_rank_0('loss backward: pass | {:.3f} s'.format(bwd_end - bwd_start), + logger) + + loss_master.backward() + out_grad = out_master.grad + out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i] + out_grad = torch.chunk(out_grad, DEPTH, dim=-1)[k] + out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j] + logger.info('Rank {} CrossEntropyLoss backward: {}'.format( + rank, check_equal(out_grad, out.grad))) + + return fwd_end - fwd_start, bwd_end - bwd_start diff --git a/tests/test_layers/test_3d/test_operation.py b/tests/test_layers/test_3d/test_operation.py new file mode 100644 index 000000000..05acb7f58 --- /dev/null +++ b/tests/test_layers/test_3d/test_operation.py @@ -0,0 +1,465 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from colossalai.context import ParallelMode +from colossalai.core import global_context +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer.parallel_3d._operation import * +from colossalai.utils import get_current_device + +from common import * + + +def check_AB(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + dtype = torch.float + j = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[k] + B = torch.chunk(B, DEPTH, dim=-1)[j] + B = torch.chunk(B, DEPTH, dim=-1)[i] + B = B.clone() + B.requires_grad = True + + out = Matmul_AB_3D.apply(A, B, DEPTH, ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_WEIGHT, + ParallelMode.PARALLEL_3D_OUTPUT) + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + A_master = A_master.clone() + A_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + C_master = torch.matmul(A_master, B_master) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = torch.chunk(C, DEPTH, dim=0)[k] + # check forward correctness + logger.info('Rank {} AB forward: {}'.format(rank, check_equal(out, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, + dtype=dtype, + device=get_current_device()) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + grad = torch.chunk(grad, DEPTH, dim=0)[k] + + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + # check backward correctness + logger.info('Rank {} AB backward (A_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i] + # check backward correctness + logger.info('Rank {} AB backward (B_grad): {}'.format( + rank, check_equal(B_grad, B.grad))) + + +def check_ABT(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + dtype = torch.float + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + device = get_current_device() + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + C_master = torch.randn(C_shape, dtype=dtype, device=device) + torch.distributed.broadcast(C_master, src=0) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = torch.chunk(C, DEPTH, dim=0)[k] + C = C.clone() + C.requires_grad = True + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + B_master = torch.randn(B_shape, dtype=dtype, device=device) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[k] + B = torch.chunk(B, DEPTH, dim=-1)[j] + B = torch.chunk(B, DEPTH, dim=-1)[i] + B = B.clone() + B.requires_grad = True + + out = Matmul_ABT_3D.apply(C, B, DEPTH, ParallelMode.PARALLEL_3D_OUTPUT, + ParallelMode.PARALLEL_3D_WEIGHT, + ParallelMode.PARALLEL_3D_INPUT) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + C_master = C_master.clone() + C_master.requires_grad = True + B_master = B_master.clone() + B_master.requires_grad = True + A_master = torch.matmul(C_master, B_master.transpose(0, 1)) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + logger.info('Rank {} ABT forward: {}'.format(rank, check_equal(out, A))) + + grad_shape = A_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[k] + grad = torch.chunk(grad, DEPTH, dim=0)[j] + + # backward + out.backward(grad) + + A_master.backward(grad_master) + C_grad = C_master.grad + C_grad = torch.chunk(C_grad, DEPTH, dim=0)[i] + C_grad = torch.chunk(C_grad, DEPTH, dim=-1)[j] + C_grad = torch.chunk(C_grad, DEPTH, dim=0)[k] + logger.info('Rank {} ABT backward (A_grad): {}'.format( + rank, check_equal(C_grad, C.grad))) + + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i] + logger.info('Rank {} ABT backward (B_grad): {}'.format( + rank, check_equal(B_grad, B.grad))) + + +def check_ATB(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + device = get_current_device() + dtype = torch.float + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=device) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + C_shape = (BATCH_SIZE, SEQ_LENGTH, 4 * HIDDEN_SIZE) + C_master = torch.randn(C_shape, dtype=dtype, device=device) + torch.distributed.broadcast(C_master, src=0) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[j] + C = torch.chunk(C, DEPTH, dim=0)[k] + C = C.clone() + C.requires_grad = True + + out = Matmul_ATB_3D.apply(A, C, DEPTH, ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_OUTPUT, + ParallelMode.PARALLEL_3D_WEIGHT) + + B_shape = (HIDDEN_SIZE, 4 * HIDDEN_SIZE) + A_master = A_master.clone() + A_master.requires_grad = True + C_master = C_master.clone() + C_master.requires_grad = True + B_master = torch.matmul( + A_master.view(-1, A_master.shape[-1]).transpose(0, 1), + C_master.view(-1, C_master.shape[-1])) + B = torch.chunk(B_master, DEPTH, dim=0)[k] + B = torch.chunk(B, DEPTH, dim=-1)[j] + B = torch.chunk(B, DEPTH, dim=-1)[i] + logger.info('Rank {} ATB forward: {}'.format(rank, check_equal(out, B))) + + grad_shape = B_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[k] + grad = torch.chunk(grad, DEPTH, dim=-1)[j] + grad = torch.chunk(grad, DEPTH, dim=-1)[i] + + out.backward(grad) + + B_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + logger.info('Rank {} ATB backward (A_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + + C_grad = C_master.grad + C_grad = torch.chunk(C_grad, DEPTH, dim=0)[i] + C_grad = torch.chunk(C_grad, DEPTH, dim=-1)[j] + C_grad = torch.chunk(C_grad, DEPTH, dim=0)[k] + logger.info('Rank {} ATB backward (B_grad): {}'.format( + rank, check_equal(C_grad, C.grad))) + + +def check_add(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + dtype = torch.float + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + device = get_current_device() + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + bias_shape = (HIDDEN_SIZE, ) + bias_master = torch.randn(bias_shape, + dtype=dtype, + device=get_current_device()) + torch.distributed.broadcast(bias_master, src=0) + bias = torch.chunk(bias_master, DEPTH)[j] + bias = torch.chunk(bias, DEPTH)[i] + bias = bias.clone() + bias.requires_grad = True + + out = Add_3D.apply(A, bias, DEPTH, ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_WEIGHT, + ParallelMode.PARALLEL_3D_OUTPUT) + + A_master = A_master.clone() + A_master.requires_grad = True + bias_master = bias_master.clone() + bias_master.requires_grad = True + C_master = A_master + bias_master + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[k] + C = torch.chunk(C, DEPTH, dim=0)[j] + + logger.info('Rank {} Add forward: {}'.format(rank, check_equal(out, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[k] + grad = torch.chunk(grad, DEPTH, dim=0)[j] + + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + logger.info('Rank {} Add backward (A_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + + if j == k: + bias_grad = bias_master.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[j] + bias_grad = torch.chunk(bias_grad, DEPTH)[i] + logger.info('Rank {} Add backward (b_grad): {}'.format( + rank, check_equal(bias_grad, bias.grad))) + else: + logger.info('Rank {} Add backward (b_grad): {}'.format( + rank, + # np.count_nonzero(bias.grad.detach().cpu().numpy()) == 0)) + bias.grad is None)) + + +def check_mul(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + dtype = torch.float + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + device = get_current_device() + + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + bias_shape = (HIDDEN_SIZE, ) + bias_master = torch.randn(bias_shape, + dtype=dtype, + device=get_current_device()) + torch.distributed.broadcast(bias_master, src=0) + bias = torch.chunk(bias_master, DEPTH)[j] + bias = torch.chunk(bias, DEPTH)[i] + bias = bias.clone() + bias.requires_grad = True + + out = Mul_3D.apply(A, bias, DEPTH, ParallelMode.PARALLEL_3D_INPUT, + ParallelMode.PARALLEL_3D_WEIGHT, + ParallelMode.PARALLEL_3D_OUTPUT) + + A_master = A_master.clone() + A_master.requires_grad = True + bias_master = bias_master.clone() + bias_master.requires_grad = True + C_master = torch.mul(A_master, bias_master) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=-1)[k] + C = torch.chunk(C, DEPTH, dim=0)[j] + + logger.info('Rank {} Mul forward: {}'.format(rank, check_equal(out, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=-1)[k] + grad = torch.chunk(grad, DEPTH, dim=0)[j] + + out.backward(grad) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + logger.info('Rank {} Mul backward (A_grad): {}'.format( + rank, check_equal(A_grad, A.grad))) + + if j == k: + bias_grad = bias_master.grad + bias_grad = torch.chunk(bias_grad, DEPTH)[j] + bias_grad = torch.chunk(bias_grad, DEPTH)[i] + logger.info('Rank {} Mul backward (b_grad): {}'.format( + rank, check_equal(bias_grad, bias.grad))) + else: + logger.info('Rank {} Mul backward (b_grad): {}'.format( + rank, + # np.count_nonzero(bias.grad.detach().cpu().numpy()) == 0)) + bias.grad is None)) + + +def check_sum(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + dtype = torch.float + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + device = get_current_device() + + # tensor + A_shape = (BATCH_SIZE, SEQ_LENGTH, HIDDEN_SIZE) + A_master = torch.randn(A_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(A_master, src=0) + A = torch.chunk(A_master, DEPTH, dim=0)[i] + A = torch.chunk(A, DEPTH, dim=-1)[k] + A = torch.chunk(A, DEPTH, dim=0)[j] + A = A.clone() + A.requires_grad = True + + out_tensor = Sum_3D.apply(A, -1, DEPTH, ParallelMode.PARALLEL_3D_OUTPUT) + + A_master = A_master.clone() + A_master.requires_grad = True + C_master = torch.sum(A_master, dim=-1) + C = torch.chunk(C_master, DEPTH, dim=0)[i] + C = torch.chunk(C, DEPTH, dim=0)[j] + logger.info('Rank {} Sum forward: {}'.format(rank, + check_equal(out_tensor, C))) + + grad_shape = C_master.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + grad = torch.chunk(grad_master, DEPTH, dim=0)[i] + grad = torch.chunk(grad, DEPTH, dim=0)[j] + + out_tensor.backward(grad / DEPTH) + + C_master.backward(grad_master) + A_grad = A_master.grad + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i] + A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k] + A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j] + logger.info('Rank {} Sum backward: {}'.format(rank, + check_equal(A_grad, A.grad))) + + +def check_reduce(): + rank = torch.distributed.get_rank() + logger = get_global_dist_logger() + dtype = torch.float + + j = A_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_INPUT) + i = B_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_WEIGHT) + k = C_rank = global_context.get_local_rank(ParallelMode.PARALLEL_3D_OUTPUT) + device = get_current_device() + + # scaler + B_shape = (DEPTH * DEPTH, DEPTH) + B_master = torch.randn(B_shape, dtype=dtype, device=get_current_device()) + torch.distributed.broadcast(B_master, src=0) + B = torch.chunk(B_master, DEPTH, dim=0)[i] + B = torch.chunk(B, DEPTH, dim=-1)[k] + B = torch.chunk(B, DEPTH, dim=0)[j] + B = torch.squeeze(B) + B = B.clone() + B.requires_grad = True + + out_scaler = Reduce_3D.apply(B, 0, DEPTH, ParallelMode.PARALLEL_3D_OUTPUT) + out_scaler = Reduce_3D.apply(out_scaler, 0, DEPTH, + ParallelMode.PARALLEL_3D_INPUT) + out_scaler = Reduce_3D.apply(out_scaler, 0, DEPTH, + ParallelMode.PARALLEL_3D_WEIGHT) + + B_master = B_master.clone() + B_master.requires_grad = True + D = torch.sum(B_master) + logger.info('Rank {} Reduce forward: {}'.format(rank, + check_equal(out_scaler, + D))) + + grad_shape = D.shape + grad_master = torch.randn(grad_shape, dtype=dtype, device=device) + torch.distributed.broadcast(grad_master, src=0) + + out_scaler.backward(grad_master) + + D.backward(grad_master) + B_grad = B_master.grad + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i] + B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k] + B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j] + B_grad = torch.squeeze(B_grad) + logger.info('Rank {} Reduce backward: {}'.format( + rank, check_equal(B_grad, B.grad))) diff --git a/tests/test_layers/test_sequence/test_layer.py b/tests/test_layers/test_sequence/test_layer.py new file mode 100644 index 000000000..156e60333 --- /dev/null +++ b/tests/test_layers/test_sequence/test_layer.py @@ -0,0 +1,26 @@ +import torch + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.nn import TransformerSelfAttentionRing +from colossalai.utils import get_current_device + + +def check_selfattention(): + WORLD_SIZE = gpc.get_world_size(ParallelMode.SEQUENCE) + SUB_SEQ_LENGTH = 8 + BATCH = 4 + HIDDEN_SIZE = 16 + + layer = TransformerSelfAttentionRing( + 16, + 8, + 8, + 0.1 + ) + layer = layer.to(get_current_device()) + + hidden_states = torch.rand(SUB_SEQ_LENGTH, BATCH, HIDDEN_SIZE).to(get_current_device()) + attention_mask = torch.randint(low=0, high=2, size=(BATCH, 1, 1, 1, SUB_SEQ_LENGTH * WORLD_SIZE)).to( + get_current_device()) + out = layer(hidden_states, attention_mask) diff --git a/tests/test_layers/test_sequence/test_sequence.py b/tests/test_layers/test_sequence/test_sequence.py new file mode 100644 index 000000000..16122f93a --- /dev/null +++ b/tests/test_layers/test_sequence/test_sequence.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from colossalai.initialize import init_dist +from colossalai.logging import get_global_dist_logger +from test_layer import * + +CONFIG = dict( + parallel=dict( + pipeline=1, + tensor=dict(mode='sequence', size=4) + ) +) + + +def check_layer(): + check_selfattention() + + +def _test_main(): + # init dist + init_dist(CONFIG) + logger = get_global_dist_logger() + logger.info('Distributed environment is initialzied.', ranks=[0]) + + gpc.set_seed() + torch.backends.cudnn.benchmark = True + + # check layers + check_layer() + + +if __name__ == '__main__': + _test_main() diff --git a/tests/test_lr_scheduler/test_lr_scheduler.py b/tests/test_lr_scheduler/test_lr_scheduler.py new file mode 100644 index 000000000..012ea4476 --- /dev/null +++ b/tests/test_lr_scheduler/test_lr_scheduler.py @@ -0,0 +1,69 @@ +# from colossal.components.optimizer.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmupLR, FlatAnnealingLR, FlatAnnealingWarmupLR +# from colossal.components.optimizer.lr_scheduler import LinearWarmupLR +# from colossal.components.optimizer.lr_scheduler import MultiStepLR, MultiStepWarmupLR +# from colossal.components.optimizer.lr_scheduler import OneCycleLR +# from colossal.components.optimizer.lr_scheduler import PolynomialLR, PolynomialWarmupLR +import matplotlib.pyplot as plt +import pytest +from torch.optim import SGD +from torchvision.models import resnet18 + +from colossalai.builder import build_lr_scheduler + +NUM_EPOCHS = 5 +NUM_STEPS_PER_EPOCH = 10 + +cfg = { + 'warmup_steps': 5 +} + + +def init_cfg(name, **kwargs): + return { + 'type': name, + **cfg, + **kwargs + } + + +def test_scheduler(optimizer, scheduler_name, **kwargs): + for group in optimizer.param_groups: + group['lr'] = 0.1 + config = init_cfg(scheduler_name, **kwargs) + scheduler = build_lr_scheduler(config, + optimizer, NUM_EPOCHS * NUM_STEPS_PER_EPOCH, NUM_STEPS_PER_EPOCH) + x = [] + y = [] + for epoch in range(NUM_EPOCHS): + for i in range(NUM_STEPS_PER_EPOCH): + step = epoch * NUM_STEPS_PER_EPOCH + i + lr = optimizer.param_groups[0]['lr'] + x.append(step) + y.append(lr) + scheduler.step() + print(y) + plt.plot(x, y) + plt.show() + + +@pytest.mark.skip("This test is skipped as it requires visualization, " + "You can visualize the test output plots on your local environment") +def test(): + model = resnet18() + optimizer = SGD(model.parameters(), lr=1.0) + test_scheduler(optimizer, 'CosineAnnealingLR') + test_scheduler(optimizer, 'CosineAnnealingWarmupLR') + test_scheduler(optimizer, 'FlatAnnealingLR') + test_scheduler(optimizer, 'FlatAnnealingWarmupLR') + test_scheduler(optimizer, 'LinearWarmupLR') + test_scheduler(optimizer, 'MultiStepLR', milestones=[1, 3]) + test_scheduler(optimizer, 'MultiStepWarmupLR', milestones=[1, 3]) + test_scheduler(optimizer, 'MultiStepWarmupLR', + milestones=[1, 3], warmup_epochs=1) + test_scheduler(optimizer, 'PolynomialLR', power=2.0) + test_scheduler(optimizer, 'PolynomialWarmupLR', power=2.0) + test_scheduler(optimizer, 'OneCycleLR') + + +if __name__ == '__main__': + test() diff --git a/tests/test_models/test_vanilla_resnet/test_vanilla_resnet.py b/tests/test_models/test_vanilla_resnet/test_vanilla_resnet.py new file mode 100644 index 000000000..bc9144fe0 --- /dev/null +++ b/tests/test_models/test_vanilla_resnet/test_vanilla_resnet.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import pytest +import torch +import torchvision.models as models + +from colossalai.builder import build_model + +NUM_CLS = 10 + +RESNET18 = dict( + type='VanillaResNet', + block_type='ResNetBasicBlock', + layers=[2, 2, 2, 2], + num_cls=NUM_CLS +) + +RESNET34 = dict( + type='VanillaResNet', + block_type='ResNetBasicBlock', + layers=[3, 4, 6, 3], + num_cls=NUM_CLS +) + +RESNET50 = dict( + type='VanillaResNet', + block_type='ResNetBottleneck', + layers=[3, 4, 6, 3], + num_cls=NUM_CLS +) + +RESNET101 = dict( + type='VanillaResNet', + block_type='ResNetBottleneck', + layers=[3, 4, 23, 3], + num_cls=NUM_CLS +) + +RESNET152 = dict( + type='VanillaResNet', + block_type='ResNetBottleneck', + layers=[3, 8, 36, 3], + num_cls=NUM_CLS +) + + +def compare_model(data, colossal_model, torchvision_model): + colossal_output = colossal_model(data) + torchvision_output = torchvision_model(data) + assert colossal_output[ + 0].shape == torchvision_output.shape, f'{colossal_output[0].shape}, {torchvision_output.shape}' + + +@pytest.mark.cpu +def test_vanilla_resnet(): + """Compare colossal resnet with torchvision resnet""" + # data + x = torch.randn((2, 3, 224, 224)) + + # resnet 18 + col_resnet18 = build_model(RESNET18) + col_resnet18.build_from_cfg() + torchvision_resnet18 = models.resnet18(num_classes=NUM_CLS) + + compare_model(x, col_resnet18, torchvision_resnet18) + + # resnet 34 + col_resnet34 = build_model(RESNET34) + col_resnet34.build_from_cfg() + torchvision_resnet34 = models.resnet34(num_classes=NUM_CLS) + + compare_model(x, col_resnet34, torchvision_resnet34) + + # resnet 50 + col_resnet50 = build_model(RESNET50) + col_resnet50.build_from_cfg() + torchvision_resnet50 = models.resnet50(num_classes=NUM_CLS) + + compare_model(x, col_resnet50, torchvision_resnet50) + + # resnet 101 + col_resnet101 = build_model(RESNET101) + col_resnet101.build_from_cfg() + torchvision_resnet101 = models.resnet101(num_classes=NUM_CLS) + + compare_model(x, col_resnet101, torchvision_resnet101) + + # # resnet 152 + col_resnet152 = build_model(RESNET152) + col_resnet152.build_from_cfg() + torchvision_resnet152 = models.resnet152(num_classes=NUM_CLS) + + compare_model(x, col_resnet152, torchvision_resnet152) + + +if __name__ == '__main__': + test_vanilla_resnet() diff --git a/tests/test_models/test_vision_transformer/configs/vit_2d.py b/tests/test_models/test_vision_transformer/configs/vit_2d.py new file mode 100644 index 000000000..92706e8cd --- /dev/null +++ b/tests/test_models/test_vision_transformer/configs/vit_2d.py @@ -0,0 +1,107 @@ +import os +from pathlib import Path + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ]), + dataloader=dict(batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True)) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ]), + dataloader=dict(batch_size=400, + pin_memory=True, + num_workers=4, + shuffle=True)) + +optimizer = dict(type='Adam', lr=0.001, weight_decay=0) + +loss = dict(type='CrossEntropyLoss2D', ) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict(type='ViTInputSplitter2D', ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict(type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict(type='VanillaViTDropPath', ), + mlp_cfg=dict(type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +lr_scheduler = dict(type='LinearWarmupLR', warmup_epochs=5) + +num_epochs = 60 diff --git a/tests/test_models/test_vision_transformer/configs/vit_2p5d.py b/tests/test_models/test_vision_transformer/configs/vit_2p5d.py new file mode 100644 index 000000000..f788cb704 --- /dev/null +++ b/tests/test_models/test_vision_transformer/configs/vit_2p5d.py @@ -0,0 +1,137 @@ +import os +from pathlib import Path + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=0, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=400, + pin_memory=True, + num_workers=0, + shuffle=True + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2p5D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2p5D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2p5D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2p5D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2p5D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2p5D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2p5D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2p5D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2p5D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, depth=1, mode='2.5d'), +) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +schedule = dict( + num_microbatches=8 +) + +num_epochs = 60 diff --git a/tests/test_models/test_vision_transformer/configs/vit_3d.py b/tests/test_models/test_vision_transformer/configs/vit_3d.py new file mode 100644 index 000000000..c66212f04 --- /dev/null +++ b/tests/test_models/test_vision_transformer/configs/vit_3d.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from pathlib import Path + +from colossalai.context import ParallelMode + +IMG_SIZE = 32 +PATCH_SIZE = 4 +EMBED_SIZE = 512 +HIDDEN_SIZE = 512 +NUM_HEADS = 8 +NUM_CLASSES = 10 +NUM_BLOCKS = 6 +DROP_RATE = 0.1 +BATCH_SIZE = 512 +LEARNING_RATE = 0.001 +DATASET_PATH = Path(os.environ['DATA']) + +model = dict( + type='VisionTransformerFromConfig', + embedding_cfg=dict( + type='ViTPatchEmbedding3D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + in_chans=3, + embed_size=EMBED_SIZE, + drop_prob=DROP_RATE, + ), + block_cfg=dict( + type='ViTBlock', + norm_cfg=dict( + type='LayerNorm3D', + normalized_shape=HIDDEN_SIZE, + eps=1e-6, + input_parallel_mode=ParallelMode.PARALLEL_3D_INPUT, + weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT, + ), + attention_cfg=dict( + type='ViTSelfAttention3D', + hidden_size=HIDDEN_SIZE, + num_attention_heads=NUM_HEADS, + attention_probs_dropout_prob=0., + hidden_dropout_prob=DROP_RATE, + ), + droppath_cfg=dict(type='VanillaViTDropPath', ), + mlp_cfg=dict( + type='ViTMLP3D', + hidden_size=HIDDEN_SIZE, + mlp_ratio=1, + hidden_dropout_prob=DROP_RATE, + hidden_act='gelu', + ), + ), + norm_cfg=dict(type='LayerNorm3D', + normalized_shape=HIDDEN_SIZE, + eps=1e-6, + input_parallel_mode=ParallelMode.PARALLEL_3D_INPUT, + weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT), + head_cfg=dict( + type='ViTHead3D', + in_features=HIDDEN_SIZE, + num_classes=NUM_CLASSES, + ), + embed_dim=HIDDEN_SIZE, + depth=NUM_BLOCKS, + drop_path_rate=0., +) + +loss = dict(type='CrossEntropyLoss3D', + input_parallel_mode=ParallelMode.PARALLEL_3D_OUTPUT, + weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT, + reduction=True) + +optimizer = dict(type='Adam', lr=LEARNING_RATE, weight_decay=0) + +train_data = dict(dataset=dict(type='CIFAR10Dataset', + root=DATASET_PATH, + transform_pipeline=[ + dict(type='RandomCrop', + size=IMG_SIZE, + padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ]), + dataloader=dict(batch_size=BATCH_SIZE, + pin_memory=True, + shuffle=True, + num_workers=8)) + +test_data = dict(dataset=dict(type='CIFAR10Dataset', + root=DATASET_PATH, + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ]), + dataloader=dict(batch_size=400, + pin_memory=True, + num_workers=8)) + +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='LogTimingByEpochHook'), + dict(type='LogMemoryByEpochHook'), + dict( + type='Accuracy3DHook', + input_parallel_mode=ParallelMode.PARALLEL_3D_OUTPUT, + weight_parallel_mode=ParallelMode.PARALLEL_3D_WEIGHT, + ), + dict(type='LossHook'), + # dict(type='TensorboardHook', log_dir='./tfb_logs'), + # dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] + +parallel = dict( + data=1, + pipeline=1, + tensor=dict(mode='3d', size=8), +) + +# fp16 = dict(mode=AMP_TYPE.PARALLEL, initial_scale=2 ** 6) + +lr_scheduler = dict(type='LinearWarmupLR', warmup_epochs=5) + +# schedule = dict(num_microbatches=4) + +num_epochs = 60 + +seed = 42 diff --git a/tests/test_models/test_vision_transformer/configs/vit_vanilla.py b/tests/test_models/test_vision_transformer/configs/vit_vanilla.py new file mode 100644 index 000000000..7602fd0c8 --- /dev/null +++ b/tests/test_models/test_vision_transformer/configs/vit_vanilla.py @@ -0,0 +1,56 @@ +import torch.nn as nn + +IMG_SIZE = 224 +DIM = 768 +NUM_CLASSES = 1000 +NUM_ATTN_HEADS = 12 + +model = dict( + type='VisionTransformerFromConfig', + embedding_cfg=dict( + type='VanillaViTPatchEmbedding', + img_size=IMG_SIZE, + patch_size=16, + in_chans=3, + embed_dim=DIM + ), + norm_cfg=dict( + type='LayerNorm', + eps=1e-6, + normalized_shape=DIM + ), + block_cfg=dict( + type='ViTBlock', + checkpoint=True, + attention_cfg=dict( + type='VanillaViTAttention', + dim=DIM, + num_heads=NUM_ATTN_HEADS, + qkv_bias=True, + attn_drop=0., + proj_drop=0. + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='VanillaViTMLP', + in_features=DIM, + hidden_features=DIM * 4, + act_layer=nn.GELU, + drop=0. + ), + norm_cfg=dict( + type='LayerNorm', + normalized_shape=DIM + ), + ), + head_cfg=dict( + type='VanillaViTHead', + in_features=DIM, + intermediate_features=DIM * 2, + out_features=NUM_CLASSES + ), + depth=12, + drop_path_rate=0., +) diff --git a/tests/test_models/test_vision_transformer/test.sh b/tests/test_models/test_vision_transformer/test.sh new file mode 100644 index 000000000..1c6012a52 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +test_file=$1 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 diff --git a/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-3/acc-2D-lr1e-3.jpg b/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-3/acc-2D-lr1e-3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..541ef9c5515486f15edb946484fd95d2e2118f02 GIT binary patch literal 29576 zcmeFZ2Ut^U+BOtx<^Co}!DK{?<53c~`Pff5I9rRQ%^Iab~xY1cJyMs)tTN&`&qSLYWDjo7V~e_?EkIUf7NRoBE-W5b{>x? z1O{O-3leuh{?GBBZ39dejF^w2p0nnoZvk?stIg6-zHbO;zJx0VX=%EK*4}>RF%3Go zspw+svfewgLbRfeBFIWh+MbLWs*3IpSES8XJ#-C>GQ~K?9zsj9`H@{pJm8H+I&woe zkWE#Z?(I_&GkrU&`@cV{i?{Q&AP>CGK0h~z(Rys%802@kd7WpQ$Y2^2ag55>is5$y zI1o~Nst!u{i@~+a*5=M5KMuX3Xr8&pW2y78Bvw_ZTp|5Db}PyYGqy$+xvAB(7oiG# zB~D2IPMsea0SSo1q$n$j$yB%NO2FGYW+{3vgvKu23<}45DAi7L((a5_p^K)m1tit7 zjdl~E3IMJdEl*+E?=eY{ZgZ9DWWY}P9qDEi^f&0|Ys=m{h>hMCjmo&!kEVJ+Yei4Yd`%_^CBC>3Q6|bJUq7lfK=FyH)v>=61{1?0c4$ z!hsMA+~ipIn9g9scd?#*hmLX}r+}+rq{D2k#kr_}`1FOI2(7&A;06H){Jd9CMZ`sB zQH69a5#`N+jMvkHfi9X2o1Z~kzRiKiFpWvZR%d}a_t!1n1wQ4jMmPAUqwQ4o7U-Fr z(Y~I@Lo9dXF_ecuue-2^rz(ojNq$VOP{t`HEX}Q+W|Svi-3RS(P+sx#&`o$Y-~iQ+ zaxK6M>OdkAcOE;pSvDXJ=8GRQ>_kg=0#IVWcQ>@j$iNiDV3YN)|kgmUTinDbZhg}@27<9^=7-eKEt z{=NLk3I01#_meBNkY74+CBp$jI0o4Eo z%1rJ;;1jku8>*WZ2nghAL3&1Z;N`u4gnu$tu zjwf>mdW;{-I7mTjXR`9?%T?>}l@TRR>To<|opzAe$rLA(@XZ)tPueYUtE6eowAj~8 z3c#?yi#gpH`YRKg3CcD5n$g^>XDH%RB~7Y2MKXY) zfb8NxD(%f3=5;6N@b@^zr4mKxA^p^`rgm+wxA@^H(BM*Fyjf7)j(+#nL*!*@jUrh>qV z$Zfco5g$Vt)#8WsqNvdC(fprGTd+;SJzFB@Tgw9A4OY&^mo-&R=bXk4kn-$G{C7Oe z4%n`@lCzUe&fxm)*n%3zk&K$8X=>$+>rAd1KvezCatN@stSp?4JM&3*f0TB*zhCH% znc1F>_ovR93gun-6z8z&Xj1tKah%E*-vBQ}7>_W63rD}QRZ3R?k-Z4zTTd@{0) zDhk&oNfo}}Kw6Downr$IH4dYTlRUx_9-2Rz`FPHAr@MOorkl}*uaJ|_H_P$Q?r28Q zKLGYuMnrwYnrl(pzK?!PAC=8s?^x(?m>~4387uFzfO?yChLZ4!Fd-&S{%AP56E3Qw zDQ!KZD`+Kr@XSzkxZl%;ckj~OYhre*UaYV@Sn&5DM10EHSF7@ccbP!%2UqbW!l3fK!k&9%t+5r%D2k+= z%z<2krobybnc`Sd+}9}|4y2ql+*pe6S(Tu?9SY%}`DAop_Fd)aikgChYYlP!*M%X- zhlpdWm)NV|G(j2-m`EWUSX8v4wIlV7cRi3mil z93h3_u(50yK*1n~IS^qJ9Yi$iKH}hOUzjZ7>{T)WnhFyxZlq~8WvNj5U%NMCp06QX zeiu>Wt8WowCcek2G?(SUN^a125iU(X0lM zy8#kk>t{!Je2|acjljvhXBM}0Dq98U96Ujr*it7HETbZMi-ZRDs8d22CW!r&tU~NH z6reUnoo}u5^$R13$RZ8{7`{X`D(gNYFe3CaUFvKVf1{IGY|oW5JymhKbEqoyrRAqc zJc@b@JN~3vJ0;h#E#NLvpK6m}be87Zk!$@iU~4|c^*qTmZW~&8Sf(Ixi!%AznHSro ze7=hi*66*g2W-$ix9mjN5RJ`AIq}taabSx0aZ-|sy7fK9PQK@Cn_N?V(IZ1&jVOzU zMGtc?+|wH}bPQJFdMZ-;-8#U6rj_JLf{FNCcN_JQDxVmgbhc1BJe_`A^)Bv?gWVRD z65Z9#rA|h$dMbf_a|Gpu7YjFRVJiVvw2k@ZNVDqD2o~C(9@A-_G4}fTyT|!;*nB=>a@mlc)`(X(X8C&Hg6vTP-n1kbF&9legA(-d+f_4&^^X=CCgOIR z^Y2`{C0bKKe9>}9CH)Ar2r7xara{2R%N5_nUV=fq&{f(ARRLnp6J7!ER;XA=scFCG z{`&U;T1Dv@6CK?Cckkq^CEeE52PY!EzK+#^a))nX4?CiT<8Q0cqN^t|fNQ?6aqjk~ zjd_EZaia%_i3j?g-7fx}hq{)S#?$aI0{t9ylT*N*Ds4W5A+@~#=O^cU+_z}#`4GR@ zm20OcTxJm@EDS}=g>N&m#GqQVrE$7|X5LYR4rk8Jie3#bYGOum~5tK_LwN&+Cuwn69i`rt%j-4i zA3V|mqCf1Ptba7-SzDPM<$T=eg}Bajjkp8*WCb&uyU6N^OgJkG=4tiQ_*7z-x=Bzz zHesrK>UIcL2q%IsMyJ{U@h017%o- zRCGI@uoVX)i-gMng<--pp%%BM$cD-%L7`3v``m9E&K3#})Zf0zw7z3_ks7OtUeHqB zp8Qt6gr}<-oS-9=vs8Fz6Hhs-62xw3Zz=nprIEGQLfGVWTO3M`DK| z!Gdyeg`DAHcJE<*sj4@P{NjVd3B13?Kk243UX0v#7_+Ji71F);v;PyxjL zMj&55LB|(Z_H_0w{cLzjU(EE9W?5xdu;PS3dTZb8ttoi%cZjX2#Q*R({=z7IVYN#r zPVv&^RLFPEtp1 zISk7`>fYkN)8clMLYQd$y*QzzQJ%dvx{Fw98@vj`+Sy6J3A($mW;pZXC%(Qa$6ol~ zU+CjNgiIMHIgoxAl$Quo9l^_ieC&Zl%bOx*fAbRb@9**dr<oxLVHSzW-}~hZe@pg99GSxP63y-#~s-jWX2KH zv>{QO?cv3N+)-u~p%%1YReWsmCTv zWoN?TC`tsk#?TbEMY@U-;l%^fZ3j-u-c8|x?P_Hju%4hir*1<b^?;bDM7%2 z(ZdM1gD{*AP5O_=Z&)g$qOk%Qvy>4-45c zN%SlZP<#e_QdA6+_k>^HoX1W=wuMW-)I^A)8g$FJwuS_i6UzVj9%?V588I@u-ncE^3uW)1`=itNXJ&w-Bfa3K6sT&UH9=FI)l z6b%l0F+GKihgNAX(TD*`s4I2R+H7$c|Aj@gyL)mK z+&h6;as7a!B5!yOQ-Y7K3-mFrgom}lEA->{B9*+RAoD{ZzSxAo&$^xT#-8j)D9M>f z>$t}^I1rCie(UN6zk{dBeP7Sw4;flrl4bcR?iagP-e|lh&N!Ik0>(t>rvcrtqABKf)&mno zz%F{LZ&%fY27O|wp!C2@{Ryk$n=N7wa<3%n-g_++qa%E;P|5H2Tcmg4;?sECRNOo9 zaOdfqTRH(-z0;xs!^dyL7PWhF)xFJnp69M`vUJW;qahXOuCElcRC^JAmRQ?@=MG

-Lu($WMRdb$K%vM&`F>a@yaw z9#21VFY-OABCHc5J{C?OqnBE!%N@DBk7TS39ci91uXN>0?Kj;k?+z4y2eCY4cUX67 zbaZs##=eEztB)OqhiQ8q=u|UNWm6e@dxc#fnMve=8~*;``Q0yKsuTLN)mg><-@k&) z#aa4AAdI#(TIsM#zXuHu7QnN@QsAMKc&PXToW!w~z$b;qyb-m(HP}&qKPiqV#S-D8 zwe-UPoVwZu-O?n7?7|8$4X0sJX6`hbJ38823-sI@6>&mW{V4@;m*4X8+o?LNpEfuL z-GaT8Hu{K;qR9a`qW)4CwX((F-3$96BNZycdcgmP$|in4qFKwP)aurMrM`jV=%r-_ z>?=}fgsJk1DM$^S%px@+jf30eu8!Q%RIMG>wHW71-B{F1E^Gf#T~^BfOo&K0b{t6w zoI(BW*%hVTz8nJuy`~pzGZu$VLv2B5lTjj8feI^LCfc7gs}0q&w%Q%%{a_$vXDOVV zOmhLjw#X=%ew_6%)R_`S_h)4_$umuw1}VeBCs&)(TgL?YuC5d}E~M z?33q(-dgAqr%ogBjz0XR_p+*HGJBe-M>oZYIIRr9=q3K2TmRa7 z|HB4JT&3?~6;YOE-`W5g@trGndKX3F5xmK_&>K=-JPUg?+Cg~z7%pKHMZ>cBe^3p{ zuyz|!6AURpim6KdR!LHHunI5G536;$CT_jkBFk3aV(CV~%VZ)%WBi}9@g=%8>zgM~ zBYYglCu^HCKIj$Uw;af85f0>OE@Zuy%^zLj>J6uwFORL+m1kc0gv6Ujg`;K1ai%pb zbB%K&&2p!*`?`W(`t-klFZh1Efmq(@vKfNpZs1|_qdAaW*8y-odQhfOtOEqcXY5ZL z2z4Ch`3W_hj+%DzTX_z`omjRMl(72PbS4_Tgr*aa90+~?WDTxwqut&ncR%)mq4uLn zW^$lp6UKCK_I_Fa9?FIW2Jik3WEP|HIS~A}QVt|GDUGd_1~|ThGTDn*;1E~`(${%3 zFtN;XhO-?@K<=WK0~xC3KxQKwwU^R15wz^;HwVj-#d-*$ z5^4GsC}C}Dzh?hFfLKU8U{8Y;px z0Tv>r@$!T(QxfVl;m1nC!yj)rq?E#q8%rAM2Lk5G-Gg4XNPNVzKPef}ikH%e*t?TDgUB6UmA11=n-2NN72|}tJITDuv1L__LZhF1ikvK8wm%H{ z>ewIh<#n#cl~uL+xca(?!c+GSZ=`4v>vWxMamS; z@Oj5OKlaEmcXHD81kQN2?U=W_n4hVc=kCUvFbC|k+ZWU-<0UxBNMsHqsgN!CEuI5` zy~D7#YQcbav}Is-Ku3x=v3RLGG`iF|Jl8i(3@AKK9Zr+J&2&Q;hpdfkWHmMjj6dDJ zK$X5&F`@mjJcmDLj|Q=fhmK>FnFt~jSQTsu?`?@7U`R#=hTcKKADl(@hcdet1 z8T*)uz$L2Z>7fwqJ17OjRr0Ra?4IBdBTt$hqixkGBc+<0O^d)@@G@TQgzE zjY|JCO!c1m~?e>QY*~ zd}qSXq>z9`-#jAHls4!BwXTUdj=tvcHUNl{Zb6GHVmCEyMz{cXu9GY&;8#J(v`F0-CXXPgZ;kvO;4tn{TIUniqh zSx=oHTkzcSv7JjluJ`kxGaM0-Lz1YR$gZk$zZ=ImybJrz1w%D{ow(lFj;1;UjB2;8 zp~h~Y&@XhIGAizKI+X0m-gpJZi7~l8O3E-V;1YwM88WhzlRan?Yx+vd z*j;;v_IKit`HtS5N}k7TU1z;e3qc8DB%$mBv-V{5as5XeNSDgN#giJdcpxI+?zOrR z7%>l4b!vO`Ts44H=b?$+vf@BQPE%u;8SD;p)gJ5d&um%6dFm={V+eK8gS8vpe+HFx z3^lCqsLVOCqky@9wzY+8f%vco69AhL1ao-|R^d0UFa1otl|i;iphOoe06HGqUc3b5 z2Z=zKn3lHu&B^^uHGSJ{f>yrqEp!g2Eh+RN&e;BVqRWBQ$)H}zS^*xj3j$zJ4Y6u3z}TP zcw#fFu!sXm!vMQc?c!f&k6uH6Xv7j>t;^zw?EnfIYvfD|bEP$sT&)iJHYs1#*LrhtvYn8& zLF7O-_+KmvpJgAdh5i6pwD00Bd{6s$xPwYtZ^rm859-Yb1 zNaG%BpO*J?Gd^pbem*)Q4Ey$7R`IEWtS9c5yJel|HsfScd{ToW*qO#yAd9Bb895c# zU_-*j8t4;#7;_J@szaks3R=Xy+ta-nr#sY2C||w2rE>q|tuTm4h0m|+1f&4j8UbAk#ayi{R*|X8MLYhp?JG~wmeglChju{9kI?UmT=5%{MaE6*Ekcmtof9m ze%a0Nj(hN7JC%)jdNK`8NuOSz5LpkKc$tT2{%FnH#rAcy+IB$-w{Jx7$}3xf*9hsV zMwdtUdm}$%FPfl@gg_jbpM~($pOvkXnoL7D5axDblLW+olG&0e!>}W1lX{9 zrKHpOcRV6W-|j!>pImHKy$NYgxd&*UtxE8F)euo?E+$-FxW(mVYOJQLI_;_OhK2}R z)Hs-(xV4l6L6(7_;xm+*@D=_2Oemc3BY*?x_`(yEhyxLW4H$Y9#DZpVsuac6H)kqG z&a)&G9oZ}}^G$mP!O)zEQ?1Zcwn)zOa;Hd~n>;P=mQktd1~l=zVYFGVxeLYP&a@)G z>><@F9+gWdkEt<&6>v0=VZ4BaFeL+s^4*@b^;9DzsVZxwaFibIn?G} z*)0$kXE%I)e5Ys$7X*G^NXR}~obJJT#MCXo++iMOm;fp^137LLBT}v1t=WM`bggu5 zX==ypy@VX#b|UOO>yEfQx&759-xMrx;s<_XmjQ;gQOkjlLFcCa8r64n2(>(fX=?(!{*vUYVSJk^gn$nqziY`ix%JrH2<(U2a})PQG;>CJbUt z{MZO_@=;VFVh6xS#0ZQn8eB6#=KF#~$oTorP*45`xBz9GTzrCXd&_yko+iXwo+wYx z|F+v61D*_Zb~lW-N-?+<%LntoK*Fu8Xa*FT)}v|k=Fw5-MV#+zE%%t67dpmNp@;kx z8}WbQpDsjq5lf0^i2?I{$a2 z;4)I@Kz87_aUi4isPB1uKEG6ge$r&Zd{y^Q4&ZA+3oJL7t>}U6GGemd*eFa`4yXvu(!uThVxFNp+;oVA%z-6aTxRJKOb6;9M z*c4b}>(t!gqVcT*p&Hg^8l~Raja8BMk}C~x>e+fqmy(p$72_?E`A&){s%-sWdiqxE# z0I`Kx)bfPIqwHbb#dh|^12>5KWl)0HOZdA`IV0bzjH6(Yk0VUINl%bt%w-XTQThB4 z>Rbd*w`$Uhcu4&>mt>Dq8UjUGQCp&}MTJcFIZvVQU#48`>$NlunTr&6)}#MW6sp@( z`Bj7cP5SF@jn6;MBn3rBI6N~5V@lRf@4b;k!pI@(bNMLVVG99$Ud6(Hm`VV$<8m24}OWBombe5IJZjw%`yz`D% zlsyNF=Vf287JXobmJ!^AZ4#1Xrzh~;M{(b-->;&CWA#3YjGNPc0EP^huPs8q-*36W zj|P(Fv;7S`)4Q4mqBYX94p83Lp1PR9f#^+CltO%amQYvFEk0#y?DRq9 z9(Sr|OYKtj3{RF^%DCc)$iSX>ukDgoW_<}*&$KZ$dg25%WtaIGSJ%Vd z)<5>#p%n4(*)BshN@0m?7v|(kb|3Ddv=&?nbki`^92o0ebqdVB2ezz?s@#7ecJAY| z$Ef*1`56wRb~59|m-S1m;pu%Qv-98y{*q}yB@w6h@3op+VCNFfcrN~ z-OnKlrjj%EWljxsFD<*){Az)kD2bB3d?hDaTh|OrB>$E#89P!>2))|(!R)9c4#3ba zvT~WBA=*q|hDo@J;}e%dXgg}zvt_g^URHOa&(vV3q<$bXX~hp|&FiG&+cBj4tf3#3 z>tTMI$wh>5jkDsLC;9tz&hJlCw;Z@!b1L$(E_d~pg6DSkXM5Dc<}q_Fm$sN^@9FNg z`Oso2aqj-FoxS8gfe}ZPxIm{3BB%TI$Q~5^Xv0np2|3ieXq(S@2=Ksu4U=N_S!KOVCzJS5ICX8lWV`|AK-|@JtrfTm^DSP$C zv#QJ5uKQdstj|iMsF_YSYw=5d0@CP5SdSVmnkbu+mg5|ps-SM)AcmI z1S8j%zVB|9r*s!R4l{Vt)AoiCZ+Kg@qVwL?iCbxBhhQ`EV8YG?%7Gl!H9qsFyfD=W&6~5tFjmay_wQQRTua5CuKWDsULEWn=JExjU5p2hPwd9O zf*qZnh~C;lZ*;Uxe|zK}X4f;SB0V-{vyJ3jWPCFg*fW{$!Inc2 zoSPTC!#6n^d{o*j3U@ZM`AB`qUg}CFtA-L|V~n97asahd?2kbc;pn54^&?F87gwvX zia;)^dNHS8e@e}P192#3ytsKpK1noJ<@N_^()eYKJ&pONH$b_TUE-`;qFuD+TPxW| z?$AnHUr2p#Q5DG1s;GZtxm-7q;R znh3vZuNj5QQ=*;D8@EaLF0XmAkIgD$y|k8(wr|<|y3g&hzt+NHWZ7;F-H;%-2%C#3 z4A{Jy1mdT|TBd};gBW&4a@Hc1;sU%?7q=d(JdtRDKeUh+IpS<8X}@QvOv@)DD4O8M zqSTlgoGZa)KR)MpB?BI0iR1!}h1`D|c>ij0_(aB@<3R5Db+WFbSeria-9)vhpk^iE zm2lR*0Dja$-f+_$Odjg*=g%4NQUCb(Yi=>`>8~fJ4i^#n8&j_oWbjl2m<6SDUk;=~ zcv_ovaD3#`;J=sZcNu9%C8Gq9o5PQxln`#T$w$*HwXJf?c5k;RmOr_RjWjn-=W-vF zTTcy0MpvVl63QNZSdT-xB5_!OtCw?7;-OK;a+_+UkLir^Z&o#auBtShb-yE8LFocq zn$Nykz$&8K0<&}2jw17%B~|<=rDq>-_@(9Hp{Qd9UF%KhX%Uu1i{HV~p2=!75}^jS z?`KK^XQz|!cc&z@?+`?IfWX+sy3UR6t~(CH`?jdoe&pJ86T5E)seo+FGX_S;@~~7e zZn71T90%&1YchL3H7PEvD=AUg5uNufrR{FrbX_UVKLB}UjG|v-Wg@q*KOm(tacF*o z=B$hBlZ~c&1RI(E((3U;Ep?UP3`;|^ZDKb{dLpeu@cZ*)VvBOrFoh-T!(p==2s$u2 zbIzBPb38-#Dk5aarR#;Wy)|m9aUFJkCMQDc99t$-UT7boHJeV`8(0u`!nFC!Aqdx} zvn2%$XFm(`um858`Y2lZ zY^?l)X{y%#tl7ix?ENnNS+y0e(hlQ^iyKgDV3nkPvq=i@N69cPY1t2-E;!)U#_yCx z-aAqJW@3ih=Hg-F(z$47`gfp?VdWDHK-v65)P#nk+!mJ_fkM)kot2;V+L-Ov;MzHm z_N`=-g5vAvHAZ|tmD)f6igeyY4LOY7ri)FU++?Wt<_F6U@tk^2d3T!0%< zV8hCKAcP{ zjGRYq@RRIpl{MQEV}0%eJiN!@mIbO_Dk_so*7i%lJuppe*YKAUxG5kqnC?Vkc=U|G$_NyS1F3PP+?2!60NR|=No!zipV!# z3E;S+zxeUXY9Xm$e%H^&iYLcL;)zpQMly1DW217@Ald@sc=bla17@#`8ci<+JgTTk~hb)7l(n^WV}MKl=3)RbF@K zvsgz1O>YH%tldVaOs8>$U&gf{1wWOA^r)Yzwu*+?RNPfPbD~h-W#Q9jR8%Ze6D&^a zuo6G@bgXaDf`RKx=Y@^v$48Z8cFG7mMP&}II-PT;eHAM-OODuun@*eGZBiNS))iG= zfC@E@ z{up@32x6XL6&84d8gqB^$|}I|wRMT5Waq7GBx*_xM(M@#_peO445ubsPYyhnW%rhs zUrjEHMpL7X^r3v=qhZZ8@M?2hBeabM5T+*+qRFtWK$=D3T>s*tvsF}ropXdlAs4Kt z>NS_`!a24vs>KlNjvfyn@iF%$CUuqr5H3^~(axt-vK>8Z^9~go;BL?#!zrW8Z)sXd9WE)gldduVv^qy&pf8 z%}`!P1-(EwjQM-)!+yoi zuHG98skvEz^OY0#Cw%as(hq`8MTRJVhR^d~W-5J40Y&QZ>9D2G)SP30Jk_zav>@CW z=WuMmd_Zc^`{Vv^d#$Vt8!GA#9@NP=Xjm!bp_=Kc_G>cv4|SZs47fdt$Ira!zos^% z>pbdLR#xEcujRj!U%{eq@5jzK$WKSM%d+;0@Uz!lrL8D$JfXN{iic?#K=qxjjSE0- zXPy}nXYX|5rLGIBM#zeN{&1kC9CBQ2g8bO8Tf~?K=D%pQ^j@0oatj(zpLT`ehASVz zC78a0WtNpyY^H&aLiBSmwRQBK*{5#JC~Z)Jz8$fHb-%$d(4A-{pF{~bhcawKY7Spr z{h{d=6sD7TDx!mAaWrmIW=vc8no11rYP$XY#nsPtr}15bzUMLDFws9|obrUqM_=`y z&U2&Kc-og@bghlmIFJTb`@oYg5stWkQO8cSoY7TJF!B|El1kyyzBS;&Bbdmw14hQv zODobJK;q{}v)20APwNjHpl3nzc;d}bxdP?56Gv}84T0Q{Sk%CGxYG_TsM~(2O17)I zTyLgf3e?v7<)sR0<1TI=+WD&b{-cIxj1M-BvlY&&_iRliJ8s1N+j+8I<=6gvXYe0` z^rlf!SD#<7E?LwMbS4nm`fZF>Hd*wUT>!Ix5PnrEBwuY*FQZ;%xVlUQ7im0l*XZa= z=h4yggod@+oZu~_7*)HE-+FGogs5gVb2sTHLP1y4;@A+nvu$-T7rHe6)U7JeR3gJ^ z0@(Pb*K*lRc5ixoxhe|9I`-87orootLbpAq#>XvdhoHx%a8B7hz7Dr?WgEgG#66d& zwY=B*;y>b+MbVA5-3)Quq|&E`olnO=h5CkR4f^>4y(_@o-E&LoE@e}JlEg#ypj-^| zbYMpus50G&VM?IcGhlGTi&@5o&AS3IpuF$_7=2NL;fw?rQ)XmEI}*wV=(nOp8uni$ zVtK-Y@Rnfydy}5Q4YYsxac+@m`!kRqYpR(}=WN|J747c&aGHWXF4G}un^{8zFO$RCxa0@GLjA+RLb%>IO}nrg8b3vP+;;6P-V zz5zvqIt_OZY(qWv9;$oS4tTD^Oo)ujY+|bKeED=5L7FYF9oa`-&;=D$e6J6u;pi3D zXxew5!d5{Dure>c^+obM3*{sBoyE7i_c#_&7Q4j}q}bN9lFxoqQCr z*Y34vkaWwd3isQu*Fq~{6g0!T^$M%PM5Ud&);w}T8n{5zxVNSs=xO50;y?^5lMk5J z$tZD)!jB?^SdZ9z>>i}F(l%fxb;|ma<`$?|)cIPQ5OcF!kB^!9_2r#Dl&^&hE5Ta> z+Qr@|eAgI$SmQiyrf4U#>+#gsg9c<>id82&x|J6HEH4xNWa2Uhl2v%HrAe5I!QlnG z*?d&9SihnLIs2E+-XQ$Br1BT|WAbb!&)U1sG`!+|vHEVqnyA3!OOi6Bsvvq+j`=+p z=J$T*PsTdGFo1tG)==vd&2b2df>o zi%yiTBg+{Y>@JuP>Z%BzbwT)He_c&q`y7hNE}F+Hwts$^tC*FQPQ1E&lH{rBAe+4x z9!(V8&d)w;Pb+VjEH7F=S$w{;D&#%~Qm+V)b(Fo^v&YC}v&$Q&(=UfVlWm?A^>v@e zG5Y%gQI(h_zG*xJs53EU_nBzD1s>AuLLDEqjg#)w4#;D&F8Q0i7q#4R9`gC*l8{5w zor1*G!0?IF2AHZcjmDf4IajhWtO_i(KYpi%D`;`&ROZWTPB~0Ds(qW8av-37de0A` ze~9<|TMzsvdQ)T|rgNEKjlFabjAz+4Am1GWtMUP&*rI6QLRrzIu*4!L$!)a+XIc>1bI#Gr;agj6Q zx6@t1rFylu5PWEhxz`eLOPP(V_Q5iPOX#Jq@>1tNj1(h3W2^8KVIl_DP6tf^5JXzR zlhlf+nYdbmte&jS(Qlesy3jYrqjiGfU>+Gdb#FuOi+ny1&oc|IJ^L%&0$c>y3>PA? zQefAWwUHYftUOq?l<{fVDgY!s1_>@#1qXOp3)?b&}0t+`DDGF zm#A1=_ra1w7Xuk%nINxVr2p|_<8VViVVW|}lqjXA0y%y(aS|+{05AqUf^$4B7asRr z(Y{PmmvT&^XVq25J)|V{!)BT5AC9^xuxzMl;9_tN_9G`oLHI#qej z`|g*1*i;sFJjkXoDzkOA&M#Xdx5!G3*f`p;pi`uI<#X=4M2E(@*WugR=OT(G#j~Gi zR26GS+EoVn@9W>~YIwLd3ZC7Gr(OV;LSnsS*q=i&;qpX}q@ZBoXHqD__{4tn`WHA$ z{t)|HhhnpGaRtb**(3JQQYIqt6muGyB;Ed~{rcQg8(S?~e$%B%4d0-q^LXmAa?`Vd z%7{6=ya!k1G#KgNfFuUwg0co~v(g>SgCi2N7r+!!*I{iYS5OOzxOd(}8cf2&saCBo zi0IpSJpD}LBGo%K<=zfSVmh}WuI1fz5blqbhZJb8iV}`l2AQS$hieF*Yj3cejUz)A z`eSa3SzgPia7(xVk{f5v|4~ThZ}0ObBOBzEuW(SU-Mj4#2>Rm?C!!`^p1W)5&)AmIt49*vvAAnP~6x|J0HTXPxZosi7=F?cGAmJoM$eSMI zt|7PmdJrK^cgpsNk`2QEi33+DYA+R)E-EPl>O{eM>$B(H#M-xmHt)ROnR?0Elc%?q) znT<6o#$=NVyBA%NcDqT*cmJW`XXWgEnrg=5_ZumFtHqNxd+pKPU5hG`OFFioWZ3kS zn_B21;;bL`5tb=(BK4!1T(*dyfCO7cHKzbfjm0ntAm9GR;GYuTzZXdTv+w=y1X^NOt?+!FilU%xqfcHVsXhO>D=bj=dtiDL_gqr7XVb4k28xut0arbp*anwNGp;_Kz;;4+Xal`rU}R_Ul1sr&5p zL*_GhF$}{aLICA)J7Nl$4{t#f6l>=iZ0ss%aR1q}|9Up%U;WMh&g{#25`78iA!B93 zA6c?uv7W-`aAdS1;UJLY6t5cYezbS&+eBKuN^*fvY*&keSJ&5O;-Y)`p8KxZWwlk8 ztM8*T;87J$Re@w$6;;O5;pFm(g4+9e>4~cW-y8!@^rCQ14Pd4LOX)6V9;1Hu)o#V! zava(3y^+3N2<{IubL16GDMj(^_y;m@Q$<4?>Nc{QdYp*A2?7UvT%>l(@|}0*tNOkh z?&p2?SUT%nL&dhu3yR%_N=u!^Cl|k7>z1|>ci2}@b~1N1CTg5g!+|uaO`u;ot0-Xj z90kqJzFu}HoO|T#*0l@st$nBDWY6;Uf<%p8WAOaX7j*m)*Z<5==fBiwdwwknJv;bo ze&MDOxO@b3ntvy8MXZ7z^f$CDzf2M%JiVIr9HGhB#gxGUXpB=eYt>*ZJotF5nxC_6 zjv@u#IpbzZQWSIWlUTskGJL0#l}MiDDx8HPc7TG?3_g zs6oZAwDYBRg@#DQ>9sX!yQ+mdsLEH@F1TyJ%@L;nLz*^QUc+yl7 zjQ9I!wV#!3g=b>##!3gjdRW3fi>>$-p~yg2OeY3V4_gXmSTn(ZIPM7zLtI?kXsD5* z8x$VJ_((Zy3_TOmc|-7V7_Jp!1Ft*XOvBGw&0_O-*BJf@4??3Q4n zrnnFI;431wsF&fYmv=YYPq5{BibvmG?>ja7#m-5ot{#CJp<0X)D*4DRbTtOd&0kJA z4z9+LQiO)0<3=Urm6%yEU0jr*$;T?&_`uK4XZG1-1( zBKXV3uPD5KZ;F{XquCm8V6Tp7S}vgl&U0aP= z-4EOLz(4*}z}7EPA`7z@Dou*Ig2(?Op6{op{NLyOdT8jo6s97P<=j5C0klBw#wqYe2j#Mn6fMb}l z%ZL?BplGd~@!Cm^QH?nfK!ftRePqcQMcwGI%TYQ&JDpVEoVDm=^`)qJvllwTOsW&*N%sgU%%g}0~z^33@LFS~(;Z#{sV!*oDV?O`z{k}!jC^A@)qBQt4l7$*gF-9Z}VXj-6K zhxs@16ZlM=W%S@o4p}w44n&4G&tdaI$ZLa1sM{BQ?3A!|J4^u?k$>+LPi!W1#>8LkKiz#Ie%Sn3Gn!VARyVIxO!HwO~$27Q-#v_C|!d&JL; zEqKmbt+qVTDy`B~XYK;?!KCS(-Fhnf?6*U${%R~>^+0MXiERv6c``!S{V0)!-6S*= zNOBojGE#s26}gLs##x7%J~}_hdmne^-NR79Lk`^`Ydr6(gV1m7&r6bSW-%Q9<($74 zPujmGb!&14LO}zMW=al_wC{@a8p%>KPksVP#XP^T>)L@!7WlbeMjc%Q6v|@g&sfi! zG@xh-O0+D5OPdv|d1@z0WS?VscGQDYu5N>aDC*%Bq-CC#H{+wn)||*xIKX?jd(~?r z0~Dkv&}04?H}FSa`;)l_o_6HFts-gUX_?|Rkp*~2>PaRVfE6yq8x#Kdm2=w&Iv~&V zo!$*F0<0m3*QIVtN4!lqqwg>g@%m*VWEP=LwV6)4#a0Ju$=Y$ugLJqUun_Wt=1@c* zLsMEmNZlD(^Pf9ZW445?xBvXOh8*jv^hXk4T-${*ZyMyT2ebJVw$?IqzD`Nr?V?S! zeMZ4oVe`cggJ-^RAn&0?QD}-FNYVq290-INKrYw96#(XTO%C|ulCLsEhJ1U$sqwIB z8%X1;&;YOK5^2^Yd!8SW~S8;sOTHU<*YO*`D#bVWax^(a4 zb7iZ~Gq>GW?$2tsw}ETPR?{qg|BABziuM29-~8bm!9NTu{yGehToMWO)T|2HwkEa2s+!yUP|IPDdTw`7vaDY)AP#eb3F%AUW^A8KAe&%Y@e)GRKO;Y|t z57`UsBx?g2+8s6ta4EKuHpPm{a~dXSgbJ9>u7_2AtJJ1H%sJX0imLJh?bi#$W0X&X z;equP{crMgOd+cEXW$C9eBT!~9<+cd8{k>DitD?dijg4+Lm)s1Q52bpBtl4N zL4`mLC?3t=JHA`5%3E!Jc(1IwRqsnGwLk1#=j^@q+H39g|DCyfxg`>`L@SKRQ(vDq zLkLaQTp$Do`A+M0q+1VW+duU756wSx5ZVM9K?sLtG#qpQEJYUVc11Z67HJ`-kf)t( z7-92g21Aw4Hy?MTQ|Gf-(p-nzlqr@)!~tWXf*UailkNS=?a?R8^(jiS`#k9jtPMX% z`cwg7G5*$81pgNj2rBh$ftJ9V_atZ)$=wvtZvh`I`ffAqUmTpjclJXEsGk&Z<@$wu zz9$)shHO;9tbG=yG-Uf5Hg_qsXw4AjeFHSzRmy%yTf$=F`8aeW>Ff#}h;(j#UF=#+ z8T#m7XTR|C3rmU#G)`OpBiNh1o&BE8wuWm75@R~PY!C2p2?*CCM z_#t*YM;VH1$4GrKd_NJr1kgf9V$QDE)W;-=shWyw%C7oO?v3~uDDCOtfMAK%|C%g3 zI!8Y;HFqlZOhbk$KjGg5J&<(l`|l~7+u8+MTD8Grzrg7E%`hYN95hx7yVV5w-oASg z^?L(&0&m)32zy4FcEF zA+`%gIGy8`;PHowuhj(=o~v}<7`Bzdw-8;;2Ox>$6$uojX+^$W{kXqEzkAtLw@V(w zMu)^X+qB1$ZXX(p4-faPR#v#{g!!7XX8G>eSLplMCqBt@fXy*#w{z2E^}UG5Gl#1y?G ztw$BcuSRgXoqD*_*-!NIEN|Yat>4jX;CbOX66K*faPS*wi2h$A&Wc)A1o|ST#vvUY zjz8;wePFlW9DnAd*nn&HD)p71A6h#~4?o?a>FXISfVSR9kM zTN;snwBnaz!lr42$K9I@(z4*g6OFJ_@!!0>-@d?*5ZWP)Qf-|U6^!7Wg+z4+AF{_rs z$*d!kWyC&|mZHen5TLd>e1X-FQ&DEbZkDu+w)I1n1KaP; zW!2!h@N^OlD(6OL4+#blXfiW?7Rav4vHMX-x`2UnVVL9w_bt0$cZl0>UJ77bLqN>mt>> zxWSd?KOO$F8)o5O!?Wdn*=`$QUH^-yn56*k-|P^r~d#R z6YZPVvpU%{jJLpbtD!}a%3vpfD+DqSSphOyp$tcQ zoM`ON$iW%iw;{)}arvz&hfQ^YZv1lplb9AN2tD?jmVgSbrA%4hlbcD&l6|Ue?zO!{ z3(h=7H5i9d8;JdIo9GngbVJ-%RC8v>?(~n-&FqZx)%M+=P&8@Em;oC}hE5spA#Ltr zCN5lz)SMh_BIZ7Lxq0)8bC));VNc$@d-A5)#?j`n<~{gRJZN6pt}!#j+pfrEMMB#0 zU%Q<(q=N#H1(|>>N~&Y-G#kHF`pzKWMRtnH$G)dg3vs=#xY#m5an3v8F`x128*Z$4 zNYHF6`2|1J1?A&)baylRm@ZHhm(^qoV9rUbr$+cYi!qaax&g0}ZD2iSt$FpkR~hB2 zpP2Jj?eG8QRb#NyImR0*CVZ~nMpq1PQbuRo4M@*8;Amk>%((`v|zkBHcA)J(lxcT`iiTSo`_pY?~ zbz*Wg;GW>f$O25N(i(rM{`yc-P!h%)Wy&OHe+iWN&`5GR#G5Iq8tJ)QCwboxm}yn<>~2}XA&;l-3o?B<@GK#HW-3Lt zYGZCO!3((469FvB9jU|K1xc-r7fa&iU03By?k^PVr5AXnoG=xX)ZN^_Y1XK#wg5!T zS6^l$RldL=-9Lj%sX394LLn)=?%Mig93ITA1j#BrG=npXl0ezyTUb|pNQ3QiY z&sE9xLX@w6^QNPtc{Ek6V#O!6atT~I!$qsJMhk>$&f0)M>&F7)a=gfATQ_a_mBba{ z%sEe&#(NpbS>n{g#mBqWdY89bzWLu^g8mOl9crggo756PbdJPmJ9~*KX%zJmo*XH{ zXYva5$<2IhbJvQ~NIRp%LwU1}MlUvczWACzhO z`|7KkE(ioDYod^{l4&H12_|R_Fi%JYB5dm2rf!imuM}`d+1&n<{|Nw6u1jTytOT!DfIJag;soHE9W64M zKGqpb_fe$U8(hV06J^mo4*G z`l)yV-lb9UxUk*xwsF^QNA?IMOW7vLmJlUnn@X}JDKa!nktN%NP{ha*5@RRZ*eOY} zuVY4~ESW*$%53k|v;5zC`MuBoe(wACc|Py^e*VKbGuL&^oY#4r=W!nAaeTkWIjl+6 z0z~k%xs^GDjg1X*4g7ky8; zoI(eVnCup|zrrOKxKHar$_sA!lT{re4#Q-HqgQW7^6-j^?H89&R8m$ssH&}_t9R_U zzUe75a|=tW)921RUU0g2$=TzY=XEb{AKyDc!FNNDpu4oJxR~VeEKXa zzo4+_WpPRAtLmECy84F3H%*;g-95d1{cqoW8W|lMpO~DQCXyCEfBCxf?fWuiV{_{# zbsPA#^II-92>ZW_1wQ|~V1FamUQn)GzsG>r@}jDPNB*b-S>)>NVO~)MZK5LOw`l(+**_*&T4PJcN~_a2{KIXhbM ziTeOo=X8GLr{lbD6a?%XI3eL_B=kO{SBJ4oD6N8i8M)mQ&z>D>$}wMu2c zN+DISB=7d6xLt2pkSIbeS37zd)|H0rTHxv(;s%y18ya=DC2&zKR(_>3W-8)GYU@Ag zmqARXZT(0?z#&2p<~~-qO$4a#})(SYEpEC=23YwN?*FW4)Tan5N6kd4~l#~kTIu4kadR8`iKo{;WQ=Gb;{sU7c>gI4YUg<0%Y1c z>Bq*l=q&p2t*l&*6}T43<9uowdE7pED@$w*O;ZF4Nbd=3hB`ERl>R6xI6yna6{*qJ zJ1*=yY_(r;`1PlATGi24OAuQy9?b%r8&W*j9_=8Q51 z@MQOQYSMmIHJ@rJ3Yik+c|PO&e_VBo`O0@6b7cD5lE}M=e&@_6{6bEIA;3w~`__bA zouh0M35kZt9j>l`IAhNEdAPJsjcZ3pP*V}fd_s_rpWhN9R{55lP37*n#!?7FolyYy z){I6}W7~)JMGRvl>6T4)(T1=>tCAI`Uai%tVz#|pn(|fWPf67@m3*EUa_40l|ynQ7xd0hYNJVFtzX=P% zNdnv`T%-oN5DU^bjqXD5xFh3P5R=g$O(9p{W~aQuba_VR>nXzAOum3yqUc6-9!HJr zSuL}$+y~&d6&aBz8*;1#uu>Q?Va%4+rN86Cf?S)23A;SfP<;NX?}qH$S4Lhl$A<2& z7aK*UyHwc6w4sk+0(7TRzb3utmV5cjPPt-jqV7tyBR?&#XIj2FTu9<8GFO^6-IwyV zs4JwcZXAUFS@z*t{1E}N?rXkAVBmGE-KLu(>QN83R@#L0W@XH~h zk@|rw?%9~Agsil5(CJ)wOt2)B3M%|}1I zX&@j_5f{DG^w2UXJy_UOQslD9OeD89+s+snLw(H1g?qCFMR1^yWUr)%9>0J)_gE0_ z%tvcFFgc$0WgIwaD9qRL@>6*!t_`e@fvvi7pIxK_65XA92(t)_b~#PHSzwGMG0q@k zCTG5d+>$=Qzy+fHHZZcV0Mq69f!x z$U_J+7v{#+N>F{h^^A&!&f1=Ic}VD|0fA!gVyQZk!0jWO*tG*>u|-%r8c@cr#ulS4 zQN*Hxyjy%IeN17*G`ky+P5e~{gIbVmeCKKQ4f)bS?EV+mUDfg+csm92Iv4gVV`& zbYTMZ?gUC0Y(In&(pMfpZDi!u%~^EAc=JB}B*bFb=&C`%Zw@00Gr|TxbDe9}pcFQU z4jgoxO;i2+uqWJrW{cdU4w29As50N8YcP?haOroh(!q2DMI!(zDS8f;D!13CAvc0P z(3mDOn6TY|%9F{#Ej#J!M&B!&@)47GIG&MeBNF|Rp!+4=nma9!{m$OnYY$Sj%uq=z z2vm_#Y%C1W+C>rdv2@|_bs{#ZO%eH_uQLo9uMJwOafzS3*=2+y*86qTtdVD33O_+L z2V?vmT6prGxa_y`qLfcsn1<-a1bBR&4xLz?kbl?E`Q!Gr%LTrhn}Wy0NqD&7UP@Uz zp6sMKjq7AV#1w(Fd>=#&z5_3*Q9%CMyL2H&jqoT}z0NCM+GXInK$hq8*M#e?yWczy zE&hDU8S)eU2+_cTtOdf?3MgFElZFf_J8iC1&lM@(-9 zl|Al>=u=U6SCIZaYaQ(Yo9VJ*yh5E@42Yc92jDo=QJ|Fk>FX!B6v~Jq{a#GVr7I{& zv3#w3tXbQsK26nlDvd)+tNK7M%5TB08xc!@!WU}sbx9K#dPsqHpbdG1_#(!}<9z(o zH6EEajio6ufho^}`hQlaEj0A(K~pnWkjNT3icyWf(kg69*^X`5OI9`oLOfns;j-2F zx|zy04lDLq`|I|<$Z>V)&<=ci$VbK8)Ah~?m>qKtTL;CkAlG5QIW+nHcL^f`{SX`@ zBSW^WV^jqk%C*I!sTlo>hHuW@SbXR9dgkM`FOrFOcfj89r>O$xdJ+~i_eG4rpj&If zbStjdG_y8JdZLqm+50L({aevR0ylHK9%S%`CMqRHAIO9sh?vV&WfU4-S%6PvpJ$XX z1(06tOgY4iUB~duH|9P{Vbjmrj0YO0&)ig~sa5^*KHg)apKad{gUV`jhXE5pm!w$G z_mxq{fC&;VW=FS-6M5x!^&jSZTK7G0B767syYdX6*EUdS$^5wg|F z&g7M(?7#q(mr*o9`Xxrp54t3AeZLDbcc}v|h-;I(#F{?^zfhkBPQX$|rsr6MCenQ>$PR+Ct{cx<>=x}mq#QK7%gh+DZQCQoc|*8% z;g;W97Q}xA9#CwFRkzB=et8HE_hY@Zw>R0IVPi@#=bZy}&>r5v$%0rzUK5LAWRfKyilG;(>Kr+DO0 zhQipHdm0Uc)EgxxVY7*x#w(PK^@Yqn&FC)(P21RC4rc{iI@KFR{5o1fr-ijLI$ij49C;w|I^W{)fb&6c>%`I*pH$ zlgoqmyw5UjDDIbk)8G=v6aN+dOkC*RAHt zwPU4PEb;*1zPrBCbf4nhk7t5?eu#Z(Yt$2-b!nYOrAfMVHNtLwWHZ}$_Ma~zo65a* zGk2)Tj9fzxe-bp2P}P%+C0B2gG;)Y)=>6XbLZs%g(PfDzVgAW^I}TH?_|UHv6i34Y{rX1w1;P$r@C6A zHOkQzz(}tw7oW zZD=q3SYuRg)ruTd&8|jgENQvC7Ip=ntk&qc@nlF?CG$piugQ+-#kQCaop=34V@%ke zPCYZ>;!4Sa)S00SXngd&3>?}AJ6T2)Ta#SG0IHymBx&T=hu@hx0_#ej(I5wJXUhC}N4?R+8?2&_uLDV>{E|Jf5=&>MsjQ60}$aW7= z$6tvH6cfSwBQJdD>?QX%!v-U{$B*l2z6&|nzd-r#H0>qo03#LcX*acfepF}ZKE6h? zYn{V=ner>FeCeT~xW~(ZuxB5p+~*av_%~yevto(3)exFIYColQsGAT)s2=LV@Bpb4 zX$>%9!?90ZMA+iCqlP%t*%oI@64L5^ypyY@zW!f$1Q z%MJ^|LtsO19z6jwpLq5iR^_!C2+9?2Lm4n(q#27(0#iS1x{#ei?O`bA7f+u*+cVv% zG;3$jaJ_}b+@C-NotVN#=0_M{3-GZZ&$8geIJ^&H#~QQ7#)3RtzmNa*ED!2LX;>Ym z9iv+Vj|%AsAc+PCl)GirKb}>JCM5-QW`u+#KYx+H#`(Ugw(@@Wb9n(PqvdcuDhl8s zB2n@IL~Nvt;79?0-Zu>FIfT;j<0emA^F%sU^6aPWM_$UmBGBI7V{6hA3jIfAl^LRV zEHrcsVIn|(_95S5ac2NOFmjUS>V%kVJM_{2r=*?n(WhTFZtfik%XO@J9Jbe)d5~Nj z-ExOGGsFeVbU8;<6)x+p?S4_TT<;U;A1oaH?Q{GI$I)$mZk+}A-)rFTAF3)OthfU+ z6-D)7JeO^*2jdSf3_Ft7U}Mi#1Rps)H={w}Hh<)_0wFQ?O~u#$h8c2mvHDcUP+dAX z5j1dbMsg-3a^u4fk<^2vU{y-f;RESU9-oioR*;~;q38}x{6d!T-icL|6VVP(Mz^W)6-K&bEs9|hB+BAXC1!x?63Qfi$B`qf4 zl|SQj{KaSAT6sbmngsSbmTVjF z$X#Q!O%`R&xZlRJvO}#2Rh-9_ub_Er6PlY>VhwVAUS`bYI$gCo)SYT;Rk2CJt^Oo) zEjBzR(IMoP%{7VF%AJPkP1GO#_M&wH+t%~ z8N_&vjCekr*CzWXmrw_CCKG47zwEX;w*T|jmVtlRN<@(H(WoN}80aOyq9a18@{@Ls zK4+BOr>$n~hKBk_5z7`*msM3(ArQx%U;lWm1>U&l_`f$w1%K2m$c}FO|Ec;ftofhQ zAtgBYRm`;e{0EnZU{KkJLMzH&hX5h%7yHFokQbnjm73pzVDj`(O#ASXATEYQu(uth zpQi{fGnFU-opu5%#s|i(HN?w@+zWo$q5bK}BS+z&*JQb4~TKxuJ8#hIi4jmHf4e+}Azb5O*bYq(^Kg zeC=hWT&l{0j9#7QYU>L=Y=@-!^B%-g2=VBu?X@Fd)Fw``=mD|8TY#EgM*>1HVo)F7 ziRb<F%qbjt44H&U)+OR+*L(@#X}6RB#Rhhhx4U&MUbo{hVHH%oED z$wOirF)c|w!g!4K`Cd*pqggTs5Ku!jx^U8ZcV_q^eD{>^olD()cNFXbg`bW#C>&%r zoG3jnQB04z310!B+iyzu4cVDI;P|M#Vq~!(t5Ha67UT@IZc7KX$%1fz7|avLh(R>f zq(zYhd3n!yN8&!O+7b>>g>{1w5;y+Z9t5eO%WfaSM{gcBUR##^XzSN$!;H0i{930h znW91)Mrz;GLWxZeG|E3irQ*b;?fAL-b;b-25u;+{cC6kRCmZ4rj*1?s`y1^f&+YNQ zvi}`JB2#N*xMwzepS^4GMSHV-kUy!g4a|IJ?{PmcHgjQ2^WohPAI@6Q2^_E7FUxKs z_gh7bvd$f*$d6bS1m1*W3TVKArnh*|k(i;0^})#H=o&at?md1IJ`>?J$(*7dqhsL% zzv#H5H`nmGvx`||76h1GXSg7kPGt>zkvpR<)XHdlu50rl0hG0}p)i7G9uMxmOi|ES~99?*1F*fFh(&$+`aseVp z-?qr8aFsG?Bd3ze<<#NH_C!iQ!<_&1JJhThtZXdkSAkk^Lenn;&HkT-D8 zx!0f#_#1H%&nYOyM)Fdp=A_?3Upry?9;q!hA7xJox()Q5tpeVZ5oYI|p}Ty=94_`4 z&5aKF-{>9FHRU<=pg&`;)T?TNJ51hllsnyceqqvf4A>D8^phhxu&FL$&clY>qZFq# z$5-Dfr{=9a)W4)zysXv3lT&RM2b}cp_jJehL!0mL+h9Ift{Fl#F-pMAG-Lto^d33& z(^i*s4qU&SdP(kVLv+OZPd}1scJ+HYs;)k8yZGH5Dc@Buno6(>eZ4P6H>KM-XEgDI z_%X+;@53P+x%ZuxFl#2Xv-CUTL73!u{b6GjWR&S+aCo)FdZ!(86DnL?Obb_PG@1{L zsyj+?Yd8(W@r($u@q2{y9~lF#kC2L2`)HP^1LU>XH8qNI&tsH32n}3$g&c=TAbXeW zAFk-TN+^u)Bt3CBVso4Y8L6<(uixA4_;_{pK?(aBVz-YwKV|))HeSkG7)Ujc_OOeg zelT*ZkObpM9lq`d+)l(fi#B;W>dQx5P1 zd>t+UeoYgM1BLLucJbrRnsMkW3KYA#~=aMp1p7O+WW+%mp{&26!! z2=_4g(1}dJVdlZ*Hcs+32ELGgA!0veJt@e>bk!-$786MHmoVR`*qN&9d9Rz{jy#aI zhE-Ebzs$CCi3Q=^Hf2Gub51M>ml%xh2KyVCmeQB#+A<|*?|>;N)sunKakJ}ca3-p``4~Y~tzJ$i>-m;GtpZ;nPl-$TpzvZB2geF<^!R}YlRTw#R z4+`JOu?T6@tv4ePYIHpceoZ|@vL#z;8ejf5=)=D78z?WuuUN z>lEE+_{aufIgcuTu@Te&*h{4IkBya%2EOpSn=L|a?MhD~-TaMd9iKt(i^6#OVp09HAX(oVyZpW=wtkb+AFUUkSo~b4iTSr+r(&iBJ0h z$&z{1gnhI{o&}Ku-CPD50j_A@RoMIB3n}=Gc-$0@*{}Byzcu8CFm)P&lPw9q%$!yy zKxRy`6S3n}hkH@-U-I!KI(ENy&!Q)|Sdfo4c4vLD8+$-gT>)~&d07xL6i^7D=a*6a zK%Lw})MOl;p7=PnIYbxIW7<2?bU^rXH#w8}Y|ZbRFN9(Ts4PX}iANaG5eL7^xUZ{q zY&+$7v{s6NQ?CC~#Q@cV;zU=5*Zc-{cfmLs4aL=O#c5m>(Vjt7U9ps-lD!>YqU}U@Xn6MX_fl=le-Jds%2ZMDQVNXuB zM$*)Y|sTxfy)O**M*lOq4uY%Hg-;mo}R&dI6p z%yU^|<%(^Xw~y|+w$^CxIowry=4_RNnKea{dXAA9aTs_9jU$M`uM;3Zakp+0Dnqns zJ&rEz8Gdh1Vf;JlisA6l*Jmmg@(e8Ra@_WNK-Q%1TcIieDFLr%fpeq}c`NyV?RNJM z%@*O;Q}sm!&MNm3)^u*F3od`mul8JhxbQ3GLF!5_14PJ%9{2^5N$+5=u0Z9U%0cp) z|DmO9+Ety(aFe2QQrFA`>yiq!2C8=3>jk8U+}9*9hrq@Cr;E#q^tlD>!<&~!>2ih# zOHBib%}&;>96XRup1TdLu5na9xG|NmjoYC2>4T%rXS{#8sXiUIj1|z9%w}-eycwxA zm`Rf1%C@a}b1{Q7&!yJ1zv$u>sDxm}fyBcGPBM4M*;G0sJwk`>kg$M%&~opU@*_=k z4UO7ts320)X?VmbW>;HKhOn<_WluzjxV5C$V@?B%)F^^%ho9E#1yckJ-r1gGtP&?L^W|m*nH9Ko{%gc%XTBvxG z`2GnP*@?+(zmR~Q>T>{AXczsa17aspzLbCmD7Xi)oR>*matzmVAHAV|prN`!OwTFA z&foKEQ)g0Yb>k_M=(Qj4g#v6?4`BgS*|C#!h4y*Kfg@rAJ)^DNIxQ#2n2BckR--+V%Op|;x& zfBIo)bc4FHHqf`vPUo|y?wsXt2d~)UBWnD5 zP4TC??EN_HUllj5*QbzDOb;TxPNgb!e35p1LOvnCkFCFlCKvJcBT5Ed^Bz{i)m2Mo z%l=@z+m7?%9;vrEGt8bZp^`Np?t1AsIWivoPDr^!^vAT%1yC&kkI;o2jV~42LtJx6 zKHId@wa;pv$<=zFIr8T1fNJDzzmbwWegYTrXa@@-K-ctGz{E25jRdKSooNvCX)?xK zkuEPUG%XA?+kZaE!t*Sr7t(@h1_GNqOwhj{{^Vrgy7UnJ`7C&VTLG65X+;Qf>5t6# zucbc@l_2}<&HkVujBRLFQI|iW1Mtv@K6gV#*6S^t05H&%D?8SrxY$p>oheLEN}cxo zpfK`jG?aJA0plkbNoc;2N4jqumH!5o6t>={DFCP}!)pj#n*2aRH{37DJdHFYZui1D zl*`~V?YyYiPz$QjPI*t-rpEDKM)-x-Fo_mL+OddFXc?v&^R2sBRlwAWbAtH>N{@0q z+|-sLnqTQM7|J_uS1@B#JtNsD7qX$a+eGyNt)-G{W^8vU3nDv%*#YCy9Uhz{BOdO9 zmtl^!36IhJNd5-kX{uZK?A@IU#pHX7*2qtvzMeVqOeveD_11q59vm&>bNO>lw)y(y z@rpJ7+Lh?WH_2L;QCzoG49@>f5elMPLOGoUF{Ij7uk{7|90L)8dIV_qi5iSdx=#`Jwzji@ zy9)dBnd+fF1`WppR&fcLy~cvkYB4XF_L@L?>-9> z8_MiQZ=D96BCZJnJ)(72SGR;-jG;}rGcwjb{%yt0Po${|m#gn$_PLfCN@+c|@C5KaVLC_3Rmzl)ncFC>dj_rIaHY#%e*Fk}a`J3D$=AUl z#8>#qIUnaAgKjgX19;**x*En1$^=LC=Qim56)>fjXa~TFb`VX6A<&eapD^H}J0}Pd zHQTZv?R6}O1e|yUDaMRhGhjh>P0;v9rP)7CK)WELh=+Ai?dTei99}M0lkSkk2t6+LJhXr}y$J9o5 zc9Jz2d!|E}oH$^jHL`u7dRF@ua5}KqRe04LAl7Zkl!N18k`+Xk3f!0kufVk9t+e9Kyj1ee=D5OQFHN*+K2v0H$q%808I5VoLfzm0R3a_ykCbr(Y=cm_{ z1L5-z2$UEU3N;hGB(@p=D8W16j|eprs}Ek%)qxHob~P!C&^vz^pV7BgIqTohMlmi8 z_oXd_n6^!S6+bMghWF#V{LUzwDJu2={q5!mo~(}U#r$oPApoo$TeY+nLe`xi_Vukv zE*z&^ctc*KJ_JfD=23M`seluUYg z34RThB!TI|?5AG^T4|ENfIYU-sjfG%C>99w;b%|(f^f_eB|nI9%_fq7`OpyWLh`8Ro=?~nN0e+ThGQsDL396`j33Ahvjg>p$?q(Uw7B6(f5w^#8tye^H0`n*y2Az> z4^J|bF1*qgp+t7-0%0MX38*~SMHrunX#Vtox1GpfisUmUBG7{Z|7qAxr+a^)zy3s9QDq1_Y7 z^{bsE(%18wZjKrIKWdRJMzqgzy=6hF);ly4GqLr3-Iy6zFT5rQv0EFq4-h^@{(_HG z8mnC?w{~f*dDEKn^B$#WK&S9#)tq9j%k>QNQ&+#7FvGbsUf?}oQ(V;CECR@RV^-_P zn|<3mHV-R(LgtT>{WFSno}UTPI<>v`c#`$)laG%MhNYXlLig(~G8^H{NN@J`e?0o# zl^M6?5+DSGu^?Ry*yVuLqWVmEz>OFCaiRlDw)VGOH(^xLLx}2D^>cm*f!djYr!{ov z@YyUR{YeSkZ)+Fcn8|a|f?UJo6A8|LbXphx5{R{pmqQA2#X2ON*!qGYx_j!a9>YO3 z(kp>icH^oUaGhaiT+-IU!G-?C)JL3=PH-<7rt^lySC-!DxUfh2Y+WT*)@swP$ z{UeVH?0HwR0;bgAOVe=L!ucm>IndW?K@0^1QP&?>{J;79)$viO1n&-MfsQj{@XwMU zXypn6(}H!3{@D@#AHqA-h|uXMn^+>AE|N{u>oyU08rO00=7@SBb~i+qAAW2R7KdwI z0n@Dxv855I<>V3S{h9`K<>;S4z%@_T6sq-rU%JdMdmZGjf}lVo|7qI(cq=gB24^gT z&?NLSElM+sTsV`v@C~uACV+zCKNISe zd-rfrY2@wI;KiY-S3{@C>xnJLNqby6-3>8)2Lp{ZeAHjQCfOLNTRg4^R-slks?+M< z38=7t(^u=lFTB9kgDZYz!|FF<{TZa*sqxQgJ=3C5()dRxH%4@^i*8Y=$yJRCT-)Fs zsvq(U^9JUN?p}#Eeh5ItynqOsOzTF}2lNnjBcb+nBe?up@^}u}iS(2bTpXv=m^=UB z-MrfOLpSV|vv!F7)En2%ls!AHng_G_E8eL^lj_gFwJFg}f$zA80eo%4D$S3)*f;GI zeS>cQa>_^Mhy*pK%zImS;D%@X%4+@l8dF1e*N?TP0olUx{kX<^FH8R-RLO%v{#((- zt~M|5^OZ<;rZ9d=mvWAGJnXq=k^B#Z;kP&MtTkPO-^aaF`+)Ytf(~Kl6`D43!ffxl z=f|R^q`meENpk(WsMeG|>b zUnmYghmOwlfaDyaCgt3d(g7Ak=_Qk#AFuXg^$!`*%`1=4J&4|Nqb`^Hc}Q;> zB3CZNFPK58K`XMU3aGWo3IV^h(<X+BM5Mn1JggH%7tPjP|#FdwL7hp9ET7@rjf>wfcirwWDYTLDkWYBiX4* zpfeh{0;})q!`#?4b6&|!;)50mv=LFcAQ3DX3eXZTKMf#`>LH( z?-Yztt^3Y`)OxLA$TifYT!K6BfP{_PLG5@xtejDf1T%>fx^6X9&7P_9(HkHBF!k+| z+F>d7WVNwZd(JUU8el&`v}8;8gbdhuf5h|trXT+(TtC5iVO+b}Ex@vAIKb)fyo;eQ z)G=5UZF<4G^ohr@#J-2S#Nmo9D}8}emv;64nRd$+NJf#ca09a1QyzN!&ODE$^zTi&@g{(9VR zf6YgzzTRb{vMt-j+v2B-c|B{=nv*m=G$~hJo$tcul#ZZ}mK{&@3AFXJ)jxQF3QkA? zrF|5>FJb9o{UElzC2;tEYWv-vM;-iEn7ry=j^w$~f05S*r@gTvqcwtS873T|fHEN? zVlSK;uS3U*wyBU?$bl+jTV;veV5im(E2KnCS|iRd-=U?zm1NqK8bprmZRWzn zg!d^YADx`wq3s@mwqvFZNb9Sv8?U}+?7RX-+Erik>ZYL^29q^+y!d+r!)lJ;^d2tp zJ~f;<6hP>@D(zj-Hndmbc1HG0?hhIwyZpnv>cf)7jn{*kcZ6flE9~4mWBAHkVlBe% z?#xVP5$5%8RY;co|BEWLp|YvnKt~>v5Fx`zZV9F7T>9=VP05TkG$}(`_oV<|G?d$H z6l=$xTNM|TY>0d)k{Q?I#NVAV>4AiEnFV5{QCBDh9qxP>2Y`?4KGf$fo5egPY&qI; zAxr1=k;7Lfgk5WoYUQu7-?-KPxv_IW$T$$>{6+M7V0`H7a12%?LM3AGu}_2%Blm~; z3ko9g3Z-+_r=fRweEi3_i~o6N_RlS1mfdfCQ_UTtERtTIFz)!P>47Ay+S6=+ZE6uY z7q^HI_91|TTlLz_#fDFH@F;?WOK(P|)|JG!+JvbL>zm(cuT?g!xfE8X_3kPCOD+Jo ztnkVxlku@FK6!3Pm!2@Awim_o-5yP~ZxXdEi$}w@Ic{@v+*TvvruC?yz!XiIjv(XL z?8y!!w2-&EV2mGpb>KUnm1z3W>u$!E4qrUQceG1WO)7TD{dUCGOFIs+vqp9A4=UU~ za7jMJ28<|oI?z%~86ScWoik$OqVL{o zeOhZ-)An5*s-IU=Bvrd+WFMOt8KFuk>=x)rcG6fw503NoTXI*A6A3EnopK=PQw6ud zO*#7vc1Yz}=-yg%k07EnXwAIKX=GunwrsM(|K^KJsw1g-Ys{G$w@vf28A+YKKMdbmIooHpiq3s~=6?uUT^3uqrZQ&JX>)sE z4pp7F`089jb$WI}izn=^)xnbdcq&xkzp`$B*0n#~O8%WA1>mS@ct)iLe&Hp7m#GDC zF?oJ2;)RiF#D?e=r#u1@Ye&IE=Ox(td)6w)f1T$mcUF6_4?Z>WI?^&_v*Bvvaa^5B z9yAB;vz~-+peE3aks}U_EcDf1I&fIWqQ%D+Nl(p$tw$bM%L7M9y4DG07vs)eQ@gxr zp{6faBX=bDGu{);ja*u2Sj{H2%0L0-rS5Rk9-$R`-!}P?HbtPgGtCdSx6a_%Rk5bD zS!dr6T|YUwT~%ku=4JecyZ7$B8y*PyBUqXYT?lv^O1puwWK_{TXnJO34-<;r18qA2 z7eLP6#kOg*u6f2Fdy3$MmvF6dHeZqckc;#*Xq3JpVk2vYcV4>x?TLCbg?#j#e3<-mXF9&0k%Go<-evJ{#pM}Ab_;py@9RoeoT(KEa+ZbFdwsE>gW+6JobKt+t5ejTk0K z{Z5^4+H959yWmpk4-23YkNCFK7C;-FJ;2AnUZJdmQoGN*z=z*#$PRW|575!f7(Q~bYPC$a2gmU*IWVu%0wJT{ds3> zyNpk*Y?{fkAiIkIYqc{--hg*VgLct6?!z|_a*FR2Ny?Njl-(fgXpNP3?EikqO z6T&ZKy9-rPz&(aycn^G>eryLp!fJC+Irgk>O3CBfLdzZapaJZU`)rEwRTU#7<#t58 zF$}eX_zSFh0Sdg1Q$fY{Ml~=W=PgZ&U%0~sJjHdO!eIFsls0vDwbWUgo zCuu;GrZLQ~TDr=*6n506S-#9H2(M&7pJabY@>{yCq1Jn=?%3s{m)<=o_@P?kRN#DY zTEJmDuUKVc^W>|jrLTc$O)EhRUy&&&ZPr-Rcq-jA0WO11p<1{q#)h$O5-d zUCs+A8A~74KP;OV33dL;RHPpU5R^^g+Qj`f1;ERW0*$Tn=F0ZJ#ooJ+_&RR+_G|sJ z7yMA!_+PmLM6b0z8rKQ+Hg*Iik-&fG?S`Rng=?G+Lxx~z;d3XlL8T!7@CV3wlX1K` z70ExXdfwH?k$GXwHsV%VQxtkY`5p6pM&DAz$dR|77u`jUdEm>j!snlu-W8tI z{B7gb{I-iVVDMFs+S6QbnYobJGd}vnzjc^M<`^=1T@dq#dAa$)!oox_8QiYUL(^q` zG?q_mm#~Y|Y&lBm_ncCWYVo@qG-qvOJ~^BIIOW6W^~=X)os4dz>AkQSEj?+aK9|;z zo75aSJ?~{EwXM{B{`~iegtRHQvbO1xvS02@i~3{ z`j_bYTZlTlt|1w^E90rL0!jsNB4JkH`;gdn=iW48tKSe3U$g$>#><%b#W2*{YA1`? zmZUdQ6P<0M06S%o8U;)gF^A_5l5;Sz5r+T+l3k=Wb`PL=8mRFrnMKNXWCejK0amxw ze{eX2w|b6V@us#UCwVpQJ}FPrqfSxGWBqA*zy$F*yjD5ZSPEIz>&`tMG{or#CV_A` zqo;y-4$Z4n6nI_kdz<0y`h){FFdI+*3LJs04lJ@DGAKU4hISm?0dIkl4MuFa0I$lCP-+c|E@0429C3OVM)t?PK0W%qP zKS#R~F)GM>hvly&?E5~?)XGozNLk#A^O>&41RB=&UaV*vxG|u8wCBqp-znW+Ra%DewAiM9A>Hpkt@Q6#HF%GV zFu51g7I#>Fa5qxC$w|$TltDH;-Px%s zpq-NEng%~cF3`<)5A(0@-qbMtoEPs1)Ee9eLgk26-xBn~A6fsiOS zvWYE&{IzA5mHy&^1$494&+kzk$Jz%kFavLI#+0_Yk;D=}GDpN%SmnkZVnHNTXQm9m|h6d`w?6++#oS*m_Ki4U^TXSZ6l!e4bVM4a{1|`;IS_* z5UrG+h({KBonOyd?=MgpaCG2RwXVfUninF@|Ma`~)(FgWDNiF+zioJMJF%dk__$Ta zHl{W7Trp>ew4wqF)Fd7# zdc3vUAr9*P3(Og~j+yBruJ^(AT9ae@n6Op=P0_QH47OtfGPm#4u%%L~1x1&&v`k!e zhnsKC9{Esz&fY$_ax>|o`fLEQVk7FbRp=B>+sEQq;fi^Tl2F0%8^wNB)Wng%fOnM4 zYeRQIK>D50t^f3o{;b!(S_S?TKkGkd@*!UYV`)CQR#zW(s{ zCOym~eJ%{{tHcG`Hg!`89Dm>3Xfn0ch#b@Xb$m^UoO-R zAcS@{;vKaR{`Zh_$*3C{ z`{|vpXt9s!ry22hKg4tc7%+jqSHAu+Ep+2|?09rM+ffs;*Ai??C^*3wHl)6!H7mBGk*L#M$}^+E0pXBX0HL8M{Kf zId7$*pv8V2#eG>07w+xR%^6wG@z738J zco{0pXy;ZSV|2zl$DsZLE=g{dkwAcdyM5=a z)e&d1k2^Jl%*Y<7yeoMAu~y*&%!|*-#|w^FOH@~Osqsgu#ZmfJC&)08Vp0z_-gw`) z+pXO<%0qYONICJPjCr1rlKD00W5KxhZdrBG)ls54d3JQ^Gy)Jg_(6Z|wR%NqIe%FN z*Wf|@W3Sbn12Zrgg{pmEI&&$vwar$})^Ks@W?KTdZhwSVnwJHTH>qFrj1Q67`(-~G zYy0SG%D3w)=)dN9lS1js(h~*cylAb(0DmWn6w-xD$Q6(6Mf1@EidJ?&@^yKAD|h1^um(+j z9>U!S2ZNm)CL||&aeY;Ck&QgjyJ4Og7CaHF$@?v=AEwn6?ECD|i@;v{6XLUul=z(+ z1#!1+m7kPel_fvzcbPURdY3(ItF~%ga)~V@dXABJYF#G)nw0Jg+~S?*5PL$G_fuk(>9I*$~=n3$Q=f+flZJ{evAE zqyM|zglxNuUVfkcfHh7ln=}pfyCZB7=pVQr`Ne#{>O16b{F5jK=WEl5c-J&Le}Iu& z`>e%op{&%|&AR=Z8!6lfqvWD|<8xTwTjNAawe?Y_r0i=?Z;W-~juty(J1&TxOWJmR zStLe%p{Qd0%I2V^hw6u_LE zK9m6yTF%`AJR{btzwoV0D_+@ASj@>2GW^!@<*_KcGUxuURV{j8zVU4aMyELd%ylK7 zU4yRaE7b6X^DSD$zP?mTd;eUH&>f{)BlCGr;S%K4Gw4US7}_h~Azre46jJcID2j-gxDJZ|%9I*zBTpTO&eEuutpUu^(RX#OIjfuPLFT z%FB(Jt@?wv?FOdrRc2mWR&^9RojSRV5J0AN%^&_)jf-5okN67WK3%2y)%-5@_D zH!=BP8T;}cH{6Gr@RQJ~dp(=<6O7nkH>a-D@(yedp{5U0bkD`_Cp>&>;z@eUVx%Mc z|JB`hM>Vyr`=X-QAX}<{5EW1m5Cs82Ai7nm7)9wqRHO?5r38qH(u)wKDlIBqN~9|# zk*?CDOOTQvJ&{BpEJ*RKbITZezvsU7ocrE*|0FAO3|PsU?VI2CE6#8D_HRAHmSf%G zzUGPWhGn-S_oKB2^I{i!;Bt-(-m7d&-qU;PQqs}5W9Rm6doWD)K^+W9J~m^UFF0M1 zdBImyq|%dGpf*zsi4~@!a@T5V&Dx6h4yd-P2zf=N%GO((EbV^E^SB1M%CfvV%of~X zrmtd;35NdfCF}pn=RXbdzdPh-qlFa%M!OkkS#B2QAgbW~2x%8l4plQonT_)*=3HYQ z4Q+sJpTJMmEA`DJ*iS38T70;$b3{Rri(zD2zGaQxD5?VB!yfLF7YmGADvo`-hsyM0|fm2cDg z&yNN?_EsS2AyhSTtw|;GCKtz31h%!1Yool@vi7uMbTe8eT?VJJoKUIq1OA>L`^Y-{ z&r%iN^M#(j1%LB*I_CXrp!lozCgwVL9*x3^RuO23XpG>%T8NKOTVw6szPUQs#QE>J zQXAV?$zW#$gOS-pF3b)8rJyanCx$9>GSK0i*THO;M`t&-g69mg@a@ts(t-|dEjYNF z9T3Z8jvki~bGSXvn-hUXRtaBogy$LS=Usl4DPNXpm0(Z9j>gIF&Y-{TgwQ*9B!*LieIl&0&;Dx>ueZAl+Jf_trqeg_6lHx?^QqPF|j5 zE+0MkEKkmRW}~9XXr%n(fxeP*-!gb(t3R0anX$Law$du;xuKGqBx; zUUl~ZEPQ;DR_@=e7fYg$s(i|9GPecXt6rjvvBjpv-1AtT=M+vCzb#T~d$4fP7qJEF zh7E^Sbv!KP$#JBZDK(`9U3|~?)%3a_(TPRjx#9QY~guaS6 zo?{Dh^oK`S2%v>+hZ(?cN1Lfd6Lmf9dSf`|Q%b8!3Dpu&)3sWi5oorDN2XV&wJGWuP3fO+PIsw) zt>68q-)ig_Y}*tc8VuG@H#-xjz3(v<^5@DqurktmcMUc@u$GU`WgBnNKDve-ALL{^LAE$^0mtEgx47N zg4saljclhBD}Nu$yvydcZ)?ix&$x{wnFm1+0BO8k+TxF<(|}S?R{+*d{n3o1^@a~$wS7Eu!OqPA0mpY%!%VmpdQzH=`?79_b?{^^e6g9j?Z5kJ zq{{ofCGt~rTZrbG*tys4!7jB0@SG((KZ(?|tEa5BYOj<$?UvrFuKwmwa>7nMsTJ2J z*YjV!L48_%7jW6TN|^r9VQ}D+rreAZQF2`Aot~9Yypbw7G*64Y^7;DON{UX<*BPQX z`T+oAzUp!SIZ87Luh`lqF7iLV%R%WE03UrO?z}?;T7rEU+oY#ScsQY$G~G)prYJYw zZ*k*cqcM_SCiNr&JcR7#jGo&JK>a8MZr{#$aa>D4-+1c7wOX}VXq4Lw@-9+h9v~Kb{*b+--#j=2> zsQWpjz)O6;+c%wd1mSQdFgWMbg5sr5R zh2a_<;Bn=S<9u*vkVH+F0Rxqh6xO7i(i{BUle7)jWPZBDA9pLGb zbej40fQv(PDpc(9_1Rhrb67{t{LsL-78p#FRVM#xWBK>m{_&OXe}xMEqalC)Ui{SH z#ERfP`pM@CA!}P8Bv82KN;gN9M-827Qo!YJ3!9}4VL2OpkByCxCk)QqZp@CIZ@FE5 zT9?=#{?&Q>47P=EB>jgD)8^7~t)#r@(994`EIw()T=Oe{pmuOXdRdfE)Ea3kZ$MUt zIX;vi4XPF7KPGzZ{Qe;=>;1gTr_kWta}5wS3S{eSX>br|#*=UF*Mk+;Cs$Si zv7yRlYXZ>u^w#~6=f5`0Sl*9s9ho=3%{dGBPC;UM5^ovSt-U+k>sU#CFl+%RKc;sn zL~B@J@0k*Z-oW$owv{YP4zo zc3myMF{JQD$jH?K-YBesRe-BR-|drttuQst|K!WzBgl?mr=HDGlD;lYfr`n+}^Uk zYEtTWk|awm3mS=9~MU>4Wy^I^~7N=mm0Vsay`!YS-1}98g=wOO6Pp^dWF8js2;31 zaXnHHGoBSQzcs1{yL;&z|D6M2pJP|9v9&Ru5c^Ka53WvzPe2;q5aax`$(oN9D_-*C zQzxP6>9Y`87!?^+K!;W0IzUwkhilJP&CT2M3b1wMou6FuY2^f2n+dh|^;DnaB_l$4 zkzzM?ET!LK)$(w!_h|!>_TpPif5py%ns8?j!G!$mMX=r__tlL6L4fJJ8i|(kEMyFV zkEZ9~k%_&Ff?g$m6PhI>v!3C;Pq~!1|KzKUdjG|H;1Upqq^j+h(0O0Ygh$OjoEuP- zF&cb&0o8dh>}uO!s;oot?|vX_PBe!$`6&nfBClDJHjpa5XP@k)E-jK?N zRH%b~oglzy?kuC1|55)%eMuXib8BKm+iYjaPice2hTdoL{1wQ|#K(ybq+{VZE;^}Z z=`%vG8##(fcgHgkA*RQ#dik50mlX+vRmR2azmN&Kzd6}Ph;Wn~L>M!nt-TQUb>E+S zCDp{?7qJ&Ni%%aeC&aYey47vx8@jBdNFbW4wUQH{`(n~m_nX42t|hTwWpIQ-V`l_9 zkdNjH+TpvQ>}RDUpzbc13dQh`9cSa?9fcnUlcMQHD~a!9k;w{yF$JJ{kJ3-RA1y`B zI+X!7!Fb>}*mTgp%6iv*_7z@_IB5x^3taliH=-X54wBC{Bj!`R2LKpVHkv5UfJM_a z?RTW)*w-taZ@0a?|4yKo4SM8QyA{g@eNpGrNR{iZGl5mkN0DblqCc(ZDX4imhErq+ zikFXZ35snuhW@Pt`0{BunDzS=r1vivxLbd@=qk@a$VdN{ln!C}{|#}x&5?5ih^LMr zqtaDt2q5MsdpCn45sVF`6&R)JARnjgGduK~+>KLKnHLtBvJHDE!_`Y+#Y3&!AD_fnxV6%z` zHL+V!EmUL&ZnU^#a)%#u`-0K6L4R0`ww*|aex|RvYHJ_XX)zG65tiu>A|iy3bpPbj z`V6uE)3ZUT(Z<1~tSEqFUql=A^TZv|k^qjuwqj~W++k`zN}?VQL*B_&>_3dM^32fK z-Zf(fs~Xc7`?}M6pR3`?Es6R|U}Ev_$Ljbq$@ec+gl!G97$CG;9=JE%^po$m2D$vD zbj#0epa#0FLaVS-{?#V_gpLDN)nZqZ!3}-SXTnjqziEJ4^hL{hu@aL|jG##8P5Uf5&Ib&&$9oxft&&`mRI!aGtt9ZyB z9Jnp*1UzCSj92fmJ{EoW?sKf`@vy$}_RF!(hN|?{A}Dxj*g9wQ z_=o9rsMc2RFuKf5u6yVBM#s}^Wd?$``w;s8CuC@f(ecng+r7BttI3Sf%cYYaZw2&& z3RnsWlxtnsJLCP0xGMvGbjLoLrv1+2FP_|*dK;~)J^0(XvOUuz8lJ7k>#q|fL)+mI z=~l1%%j5<#LRCo;}^kx+@#LuMK5~*5fvUf(mk^DIQ6!0~Te))l;V0H$kFT zKNvB_#+|(SANAR|J}e9?g=|lm-RIi^PlVGfJ$gT0{sdDhoZNWiRC%|?)TZ!$(}Oxw zYg}g0c<031T6nENDIa9T38HFt>1BW@e4rUh<62C3Goy1) zC*w9<><_pK#Ioubus1%VT8B+;IT<3YJ$&OZfsZW*zGqz8+>XDkk*VjrR%8|0=a`$D zmXev3PV0Beqe6t{WmWo*eAa`%A}?xbE7ydWeG#|$CL3Y%n`b-7rFJICq7OBuwc;zR zTebT7GC#t?H0MIHj!mr{Qa5`KFO<*TaoxJ^s-w62lbPoYITlrRMbOAx=?G)+xZqn= zI-!=XRwB9J{Cw6qx&8Y5N8Nis(MP3$G~lIjF)MKJO?T*eMA;EaW7Y+Bb`b@cf7HZi?g zuiqRS9I*?QaajTrU`-2#Xj8}uE1ic)Esj;zA2rXYDc1+;Ax*dqFjvovp?=QQvc+u4 z2Jcj&eBV)!pcGnxYXc?3!$Fdf4wH5p!4Iq$4K+!&XBAyM3B6Kc%<(j{?mOjx>8_%O z7*{veA{)1l*L{duEZIrWW+G{}Ao;fW{QV$C8oUC3CUM%(wc-wPdCRtAbBlrpGCjk| zekb>IU(Zn^9xs9MAK=KuPKeFJj$W?9EF8PWv7I+K$czeaP!J%aEvZ>&F5x6sH22I? zZalaVyFwDdjFBpOBW|>WFvH+=NvC(^8L7@r({33ehF>tis*x&N%}s<`1CPAAXe~c$ zmgTY;l7D~E%1HSqU&6>Mlq+TKp`K_cOaKKn>VLR@oB6=)FdVQoHgNhb{F46mC95hD z#EGwV$E{7OGR*Y4^)&sCPcVf3j;)NsA-`ZW+@Ffmu?K42Zf<<29tBI@5qDa7!6GOAJ=q|qe)$3EQtVxBU{RU24L5=0B6#nr z=5mwjuAMq?6RTSW& zSx0m}aP9~?7$h;4D6B9XO>OJ^usvpZ^wis&4(^Xv369#u4Q~dya*jD1XY)N7p$iW( z&EZ)))tWV2Q=wy^az#+XX-0Q%v}Pqf+1*j<-GfQ<#j1~>NVTLX4KP@~tYDGpAG8i=(|{Ys!+g zp*6sL{~37uECE_6Tg$#?p0@)9Lib2H7bd&S16Qp*C=;@mmjsN!9q}3{Hs~rx-ODY0 z;7#BO7JMMo<2qZ_+4W$TdEq2TrQ57yhH`I@FWI6P*m3L-pkciDmBAe zMSE{%7kKcgd(4Q9Ms@z+$iFMR8V*G@@57><#C=CPrkPh~2zK`!@^n5v=uZy)Q@-U- zId0E4-~RK`l>d4h{o^}l9aw|4Lq^9fseLr!#zxCRvO1^!5tlHt}V_ly*5idor2y~{)kRCQjvLzO=<;(P4Y+ck! z`zG{nd0pauMsM89Du#Z~OZVxW6U}5DdvoK;BY6id!m|tOlZ%fJ>p`EZlAO4%(<2ci z548w9NWI*kE{9L% zd%5+zh&~2?^(^!^v*~rea&zrWG{cpF;5H$Bl!z@97nuALi7y zAYyBe{22d??L1@O6r;*e7sFV7>^!-=7B9ANtMYDu6ux41wt$m#qmP|EQSOUzO^f}= z*#A0vEG@BMrRhiSvi+usNf7in;KbXV!suuwCp!vo-(U`)lBU*UYB%7JXupNyr^cdq zYmGET3lNL@%nWJ z(6_bOT_;0gR?q00wo?Lnow0Y5E*@OH<8vNTNXRxg1j>37w@ob3nGjasusu`%OP*mL1Nc7z4ny~iNF9YA}?hF2aC?yBFd}*vOzf^JQ&a0Uy-`8mz zc4AU3#^ER^GgmKmV12u!B2~1owqW@%l`w|42@YAD8xXxNWz+t+eEmnS zJD8I#lUvbO7+-l105?U~aOf{HT3+SaeLQ{$d%n6!5`-F(YZPBU&fBGQakQ-1@DgSB zQ{Tk8kb>9j=Rex>!1%MH9B{>I$G~nQ$Oy7&p|V0+G_aMwLAJg;g*cGnHw#;v4sUk7VX8zt&sag7qRz0{5M^hn-4(_Y5sLH8q0&)sec!V`cFGr;%emGRktief70{5?A+FZHADR&&ptlY&H?MnplTP zgV!0kKs1`1GUAokohVX(=kW1<#~RN{mqt(``6%j-sOZ04{{xZH|4acvoW}IQ7}th9q$F*O-HMm_K z;D7OKPnVd9to0NYP>gCJ?HffKmNVCzaM6xim_{v-9XK%{?mbUqKqbinZ-n}8r`%~> zRsXi?oss96=*Y3hxldD`7#e>N`21&%2?5GY!g@~b!wl7#*!;ge+UDfPYE4wfsm%vE${0z7O)P<%WFx!qU4R{4wMG-&b|`51#-39U-yp4xShK z&?b5Cm#8@7YEnhr)>L!t@}gvA#_nAyj{KwG0sI00P9K+MrBY0qQB`R zT>(N~SC=+S#3>YYUDRPm|0cV(S}Hag-ZAZ?%HExzJ?@7o4~*N$_oe=z3F#|D=jVCh zEf*Br)smn7$QlU;(Za$72574#}AC{e= zf{MS4Gt=YYcF1;Qgp@>#TrI!7L1}?}#kp+ajc_DRpY_y%MBabEZl?XJ$xlB0Q!mvr zF}oi0VO;UtEL;=uu2{gx4@+;~4Kv^=d$u8=qMg@qy$Vb&sNH@WYB?^-FqSzG9D4e2 z9wF5q4=6yIu)A--VnFwnXyQr0h)}$dx{i?^nR{F1x|%*r`C=yd{ZaA(rvkC<1W>6; z2~L~89LG#!-5=ncSojS$jv`UUf7l_^g;s2g4T2!K=dSpDTry(0NDC9f%D{qKYCwTn z%2+t@8vFy6{H#Q|Mq7wVN|z{tW+i4~d_220YhpJFnn?`H$x+i4+qhem)-FRl)HemMyt6yr6IWIv{bQO$t=r@y&Yqx6$S*qOlHCPm=pFwDYNiTu;+yem8#1pXx(4x{C zJcGDd{HAp}k2M^(KiQtujG3NQIg#Jc%TgX`ILVX)Lw5<183I#PRp#i#yC(!`0GG zoL5f0Vvq~EjNZXU;HyB1&UJ7t9|M|yp_&eqeU#T}^F6ZHQUDU#b!)M{N8kWpIsOc4t{H{Q$9&%M;f9PEk6? zNtuJDl=xy@W=~mvKbbh-Y;hue)%!EPXI)1v9CZ0@f+T?cr-Y`-STZDaeW%efZkzj2`Rp4p# zD40O>b8^Hn_E5BH8P_O-23tmmswiB0#n`wh(=+?nI}ykI&xyua@1K(HBY;cw!SO>nFW z7JMckGQSf42+J-%+v7V(hK1%kDU8Z9WV4G~ed*!?Z^QOIA{7Bbh zEf^(JGwJbTu<|o@7HZsw(7`r?iW;Gnpg!jO-_)Fe_)YLtv+6IJx2$@Jt)y|3p@1@s zD~qH3?Hfl=mi**fu{Zie23c>ibH3&=nF0{!wo!T!-VSi#2=)xoZ;;79{0j3ml5 zW{||EDZ+$ztjkK;m$MC)PGVj5SIO90vkk%ZD5IyBg-EJY!oe`pm7aVL3NE5&Kym*L z-fpn}jdIv9&=f>_3C0=j`(^8Afxas19t5KVI|cUsYA_5OK|DDO_}c9S{beiP1L^;f zTa5cNrokWK<)zTU(ZbSr6E~c}5x$pF@_5RX7fGIi-NA{2(*o;(9Yugv;-~P{u=j$y zFoysU6XwA_IE`KMHDk*ISFzrwbJ*tV+1|Yx-rTr)?|{dkD)Q%l@@Ssc_$dpE5HD67$x8DkbeB)(ehz2JWL2^iBl67vD;I_@&vrK{D5&6SRy zWTYI?TYj@Kb|N6CUfW>2bsJsxkLS}7(EQ?83x)bL#g!wLN)@QZj+ssI2}PJdDh^Kx}SB2MrIpZ@SbmO+6FQ`V8@>l$zn33N0|Grdj#g2T>0% z&n*8~_nP?9qaHkx^A}o%e?IjJr1J*AnoAV8qn|+?9|Fl{<3ZQpO!G0f&44h0DDSn5 zMsMqwar%n^8YP>$ylx_=n|pbyvUqc-=L_H{vyuX*gG5_1poIZ%52yntP_^HaFCJF0 z^LvcsF3Ni+yk?plh-Nlie$A{?uP9d9Hj5pr?;r|-4vk2{=rpSkSS~yF3Vo^%tuc*+ zcx0;#{zwl9D_-1_Yv!yv*m}3t_Vyd%BjToLNv2;1qb^+rsJVNSXSdkulk zK5`rk6CSI#W5f#QjXV`I8+)qJ_}lJIv98Zw9S+^xlh^zahnL^B`1+42y&p(f%vVGu zv_*|9-GZ;EjVB&-v<0TyYJJIWy0-x*i}VR9Wh$F;$n!%fd00|P+Wo71VzF4vo6`ha zLKt?c3{hiE%VM~)qPS_Gm$@e^p5Yp~h7;m_7RShqRCRl{p0MvNSP-NTgzZ&1A8306 zW`mrfjeY9Jt#uAR)Mrm0rLThu)16pRq=6Lr8t~Hf#nQU6dm)f&4-=gDI=0a18|Ud+ zYWHJ!oW45q*;^{)U8vp2(e>d47J!Q87|W2Cs_CHE&Dp3(iovpbE&;j0kM@X@qpt!!ZXJwhU@PyNXI&2|ztxW+Sp-<*@7M^#}eBemAF-CrnA7JD4B>G!mwu=olR!Js7 zLV&6!JPHVOOum7G1qu8P-e=jppf$NP7{?nmfr1)t2;jP1W^j9!t84yZo$?L$)bcTj zpo10ttAPD_Sa+tqiH4#&{}sq*$Y?LCk&DNAEIn}N?F9N4HZ&omh~Y z$dgCgyaAxEP)l}|Oz}!zU*t=BAnm=~FwU!_d#hg3Mdv`Qt6<|&iQ9LpUGiu4J=^*s zoxnKH;&G#RifH31Zl=LbjzwjX1!yaxr|;}V=^Rtlxay#{5qM%DFwwrl70dF;)wMHI zOD#0vQ<9sWBx_HDX+Ux%s{=48!h9mtFoA)46M9*tJRuvhoj##u6kBP%UC90IV zs>Hz$lSc|ylr^Vw-4cT?>MW1q|A{7u*a5pa`Gr8yQ-ijVLMN2^T!B)Gd^u{lK|~fu z=?0aslCCvK*^elsy zq4lz4v6fJyZ07c)A@StW!xx!*lRj_tbRjx%i5LmY7nmsV5Lz2IMQBEB1Loe#$_(ZW zbL=TlG0ypS4CaugXJE`KLEX5jlcHZDyzBTSg==W0UAb4e`sp3?5soOFCDD%&28>t* zVN|)V?l2K5TP>`t3eWFR8{9*ZDbMQib^mN<@bYjAd;R9E)^np7ZfLw6C>jQuW_5r| zV=$~87Up=Wf(eQAUqZl4hiLns&7E!Af9v5LfB!pvHzj2xZFX1)e5>V%T$|p$PFs3` z`7~J#qnGBpft*QCFGf-a5%O8@#>An-74w0$J*BS3jiDh6->M&-TJUzN9Q5il@kdrD zt^{6^L)3wS)ae0v+O#?9PrjI4uO{3lWWj@5`=k_#@Sc^%LV6wQQ(G&JDfQNS&gYg$ zF>^TlM5r_3^hV*fMUOu`Y7LGpHA~?AgxDa*i<+@qv5;an#u z!ouf8Qorgs)~5(1TW0Pv=ty-Vq(xBI;A z63V3D6%JpE8mig+_vk0%+xK(cv~F9?VIjFh(w$}UNNXVGH(($0cL(_O>VCPQ{uT?y zY)+n8Nd5<&oricF76}>il4QJ^o$QKI$-#ELV;uuPgJ?PgESyY&U*IuXfVob)U)oaY zh7}}=qTT?!b@M6|mB9(du`XEYs*WDx*LJaYm)G)&krTh$_4*{f{?AkTBjyb6E4I>z zHaV>fkAjJCTwjx-34Y6Tez3|M(IE4BY5lV;(yi?)Ytx~$HFbEVZ_xZ$?wy>6t;M&a z+Ly-yS^C^35~h6yM}W`sm8j59%Qq_fI53LLd5d@KgHKHSi{M@b_T~zWZxg-xzjhVv z4-%UDYK|6U+qO|~q1f6Hy8U!78N#Xu(Rj6%OzZFl?P6YUYW8o;?ZcUR=k9yH(kyO6 zitaY7zAwX3*^!}`a@`^A2Rz%c5#9`~D#Nqod87LwWl!{9&}(O&Q@MJ|HCgfcwAku{UwFm**A%mUsmNtLyBtlLs)WgE zszqlXVZxi?5f&h?ic#CT)VsT5%1dL)HC|KI$)UP@NGKzu@En1BmV8M_Ryii8=FSt* z7z-21)8|ej3?xT1iM56Z4aC+Iw5sjkD1wj1`UTo&5ELpxVNC|s#74ZU3OUwC9?6z& zZyB%JXC|`$IbBL(C0xC(8}jVwL4kP(F$S((4Nx4R2T~Bb(azkmhHX>JQAmczKlHi%zh>}u4~JKqlVm^`yv}&%YRD!a$9BLrN@kH)J5lon>rL?;2#=Qo@jHPk% zXy;yL+fC+}q0Et#;};T5f9u%)JDrY~AR&7b9EG?wWyBqe6Q=<$MrG60NjD$B>>7|Y zmF044dZr;#=_p%>AU4z(k^E5w=a%|IW#v?Pq@wr*{%!PQe^}3U|37kVfWq;1jZg?e zXp>=%CnJiL1=LQ-h_p^9rnNfCF(BdoOZoF4Of%|{jH#b|2Y~F!R!0~B z&t0nN1CXuh$&Qy@-^G-s&+z9@i0ECpaZz+ee(*SQ2oVNJ_dF__(Sv! z_5foPSjTc$9U+EuRrWdC(8IcxsGU8*`T}=S=)t+>eo(O)ONqH4eo}p-_@uB#qUn=3 zqiGyRq`n1FrS&e*ovF-v4iJ2&yiAwO@z%gKq4I*`Vd%TW9;f)2a_w@%Uatd*MuLNG Tv>Q)X_94#y{gmmUKS%!;Ayrr4 literal 0 HcmV?d00001 diff --git a/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-4/acc-2D-lr1e-4.jpg b/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/2d-nproc4-lr1e-4/acc-2D-lr1e-4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d5546561a225aaaaea4632eb7cb975dab870f62f GIT binary patch literal 29143 zcmeFZ2Ut_v);1bMx)|w7i;94VfJn0dQ4vrPQBaDMsE9NnA|fDxAianY6i^f*BA_Bf zx&;z?M5Id#N(v$hL=s9^kam~*D|hd+J?Go!+xwj7`S1PXvzBBfS!>NT$C%?C?|A3r z5A!LIbw{iYTR{W_1R&?Ze-QpC!~(Jkd@CUEW54RhYqj8yz0m5_f~$pvgoS_EM8rge zMZ`peg+(Pq#l(NSz`xc=h_Cta=Eq6?4o5X>B9EZGYF-|6yQoXn2G~rHy}{_%iwR+Z1zd zeqoWd1T3%oXqNy)@IRUb?*F4Vfs*kaq#u7nM1n1yYHkhw<8wu~Bvn$eq8p9qdPabtc*C?wLENw|K_{c}(yjLP4;K z3|_m3DjyPm!E~*;G`jh4m;p}_!CQx^h{Ui(=u+GQw$luHt^++seaK?s@d@v zlg5Wo9(gEoZ*g3~2j9+pyZ{^KLyiJop|ssRfvGPM$i(z-o#DE9S;2MUj@^?!mn*{0 za0)9FawuqDK7{m!6%4d9?Ra8r%4{4TvYul`GqZ63UU`-^`abe2cQ?H%Mvb&r+y2Pd z{J7qw6hTV4li*$@2<(y@Z;)J3h)oUP2!ybYa^M(`H%!w!rRrW-o8#uWfD1;+kNX{A zCK2wBx+M%D;VJ3|P6}^8#>4%)NA?o2vfcoUf+WRK$12BNJvY8$B-|c%mB_VMTd%%! z+FC2~YIJbsxQNl50!KQLf^2+hZps@$RGHT@YHbGUT%q){Wh1WBGT(wedRPSb7W;;- zYYyxZyOA8Ij=poFQa9==u^FvQ@Znn1_>h@Sh8pW-k2m*D6l^~Zb(xV@Oo1lC)}mnA zK)=7MQF#sw-LeU(e3(sdgvq_Fyr;06W9Md;pbNKypLkNY62kq_}fkdzw61Qko} z^mX6hxOx|+wyl5H%DN}>UQKON@CQd?qPZ?-D-)Can1P6k(gY$-4W8{S4GFKxH8~v< zai_n`Ce^=LdUw$3qTQyi4h^%Fe0Lc!LL0jQuL97{!nn_dSX4eFZY$6|Go}EV+eLG= z$ZT)UChUk9fo(v?>fPq%vu3N75pzST-i*OS`ZCjiLgdJd)4CgQz&6Zv8Jkq94koq? zuM7;@i4tXov1>d|a|!$NhAr5*%H~LaS$)~%1AfkkJ3S(w%oTt%#u8DntU4diCSEfx zkD!*?u=loHz2@;$uI2Tq$vYX`ya}~)XRr$9s(QBlOY(141@^ zp6G0Qd(^>lP2TyQc*l7s^UCKa3x-Hy9ijkbHpB@o7@p>--JG(6`h?~@IN{-!eDTSW zBvN8Wc#F+u!+3#V6&ugZ_$_=$Oh;5NdX>38AJVuBultoCir4vSzU?&-OtV`xHMT3Y zIVS}nsW_-7=Uba$NEh?6a*f!V>>~E@sp=QV&J3&y!G~}iyBU*@zNZ}9@7&M zg<*O$`GTi>NR!#K)^L^5`ax`Q>V?qcd)D_qzdz}%?x~r-Iy!Ri^QbY{i`m4-2|5w1 zcfhIhLsEXy4YlZX3&Zczhc{#`w|#4K{5aaBVYYe4A(*cfF@)Bg0$;-s%pVTJ5)o2{ zItsP}MiMrX2FC}g!vY@Gy?&kUSrenRPnWdVre3j4#`mW5bO% z*KiN(pcQmjhSCKKGdJoOoS%tfJ=`WT(I;qfpG!~Y!!(ooP(|Q$ys~-F)1vYhHcSa>Wh*C z)&^MW5UO!8F-r>4uU3(oJ{`)UQIk&Yj}F}vBhc!KZgk2=3@47`=(}m~k^YlC93wrSEs*ybqxN^Sw+ zA{x*bVN5nv`Uiy4pc_!T0h~yR27`Nt9TXmNjwSCNrN3PAsxjWM=n zN7k)T#NFo(wz;~f@(ouyo5G)8oe=3nNbrXD2w_s;lpHMMI9kFlU~A0>LSUryAf;K$ zT1QXC+?R~NTzpDAMcuYcen)-ceT%O%&3TwQPxSu>eI-CDm@X@UPkMq<^V-k3;)8Aq5(1K z^B0fO1+2np1UQ90r8H$(Y-Zl00k7Qcg0{h3E7_97kzZ30GNK>{UB}&H5 zBJk_Anr*oVR-T$nZ4U(mk5-XqNT%(fEW%Gu?b z^mi99$jEp4V{b?tS8gWU+vdj@Q%RWVput3VqvUdO+!ewaptvcA5Ep2=i9vHWXic=g z=_Bv1{=IPU)93ov-Wh#gOya8gj!?-!huUTVEqKCt;2ye=*TNJ2V2FVNXg6>i z4}Q#X%9^%ks(comJX)L6W@{`;Piq(Zf`0rmsr-2N(i6e^OfpB#Vjp9PBPY{f>+_* z-Np2cv)WCw2{|Y|4I+EO@=tEZr|h;|L(N%ivUYYZh~8N&B9gatm7g}1JdB`V=n?Fr zyjFxL!H2924k5fiK{rc9^$`}bVI)C5M2sweo;TPBv+70c z{>cZ{KRdzyKTbWU*-S{lt~Fm5c7)sj6T`#4<|*`)s^_@6%QU|}Y3z8buCK1I!b1F+ zR`~f0tBqc{BR3Cg)S2+sr?N8nkjF21Juu)HAi;;+%Yjpq2)>9F8w_0ljJLDVgynk= zp{McO2WanHhh`5A#)V>rQe(CvJT1if1IO!eH#Ne#W(eo*>GZljiC8mrLF(X|jD&M) z`|RHBAgm>LBSw~}BF$`E99b+%D9ogZCx@Sp8l_^_0_XaOb}#1@&flj~(>x>Jt*SZs zd5v<)oE>mZkL1M57-#Q8Q}prS~aBw^P`Fqa6mnpk5KW0=|m5F+- zz658qAgXX&bs{So#D$>^2+sRnK4gt0`v@P>=Z5xyax_swe8~Gw$hG1&)%m;ts ztDqA{%L*Nb2u5q)1*#b~6e6)(0w#zG0W7H)#jgn`w0b$N4_*Bni}r~^C*0JEC}NWv zzSMdDVT(P(P-xJ*4j0F(b~A2M(TxbPFl1A!yrlcLP(DNyegT>oC5yGlnG29dN&?%OS{Iged!@fob{s6yxVJg& zymsuyG{N&*iT8ifzMqdp-}6=j!HgwpVBDBI;6<@jYR>T-`{-eu`|w+kOG99wx^MAa zqV%)nj3cvZ_?_QpS~3nl^N zFBlr6D5iVV@BNeW7f9=`NBg(^#ElDn_V6GJ*i={rY$lgE-NN!P1}51%P}Xy0!p+;Kd%s7(PwgG{`|I|IJOmV2I^3I43LI0WNRwUywgTAcSz{zAeyu-~*@VmpFEDbJ}V};)VNU~Kp+Zo5oo}CJ@ z6XlrqUm3k6+PKO!OzOpcwbDz3-(UFecx0rK+tp(BUS9VKIk6fvvuXG$j!I}@7!HAP zV_Mw{p{5?zSxmDxzCMnBomQY7bh*^uugO<%mvPT6@*B5s*dKK;)edF^B@s-4p^Z4k z5~IKzUqi+hU?x!~@|cIVzF zcp($`5GbyCZ%C|t9Rq%Sfve&AGP8F3<@)Da&maF7e{P@YQ0^bL{jXWDa@&Rh}U9z#E%nX zz>)VuNVh~skX}N_eXg!n*+OET%xb#7!PAJV85%wx^X3T?SohEdnn>joqy%K`jOWA&Y4Ik1|xes>ugohr; zaMFPMaWY7HGVE~I$s2gv!jc!!GGVn;&BPGwx~U4+R;f4julvvB_#Qd-^f6Mui({|x=sc%1hV&WrK87JHHpxfMX<+XM;Xs&84OdQDa$h$VX2=CPnDrSc)-{m4uc&K4Ns1&{oIrl$r%{_}iD&FB(H ziQHVQQN;{ydxq*wTWKFbeZqeBOe$TxrdzzdFfiQ-+3Ow^p)$nL{siTrLl7KgH#GAK zx~aa051Cv#1t!IgFC80)K+$_N*xJS- zn&{8j1|ImSo|z_3e61$T%U+%rHdp*@_Q*`^|8(&*0_7Q52JYhm_r+W%S86sZ;mQGj zqGD*WUiDc%L?UIW!S?x-e)p{1aIH8wlpL_3@GGgi6~1BWwR>%n>8YYHxH)O0o(d)9k+KBpT^%Vw!G5os(H`oQ+tzssOEc~3g-$?%nW9ov0KG)V8-}o zpmu^rn+7N}z1ATCa|qeH7jK{H(KCNfsvBg@O=)bH)!i&(v;MZHFLMXbjG!Psq;&{`ZnTLq^uY?jy;4i)r$<}*-NLe46=4Zg zVRf3cdFpgAp75cRPH`L%5V4DXBa}sr^1AWz=&KV5As<)`jPA>DrA*SZ83o6HY~B08 zj9iqM`TZxDlp*-KKvNn2(vR)P=Wb}fDdS7s`c;)NHdMK$cN;G)U$x8J2R}vl8!IgM z8$$s~SY=gxXiv;=KY|IJ8De_^JeHnJ7`E{=@xOK(mS$@RKHMJ zde6940s3X5vqHOzoa_PrZ?hwbl-Xuz?4GM7`6vTmd|YpXWhl%4e*QQ-xpqu5=fj-< zoN{et=N7H49T(TA?cJHO!$+`&VTTPcFDDx7-3 zZX8s8eotNHm3uKA1LVwtx^Cv6wXb>zx!ifzC z&Na-;nhB1f8=<*FeW}f!a}hQze+a*4O0Vk9Bi=VFczpNMt+*^cggSxW$V9X=f>~>T zQQA86fCmC_xiWp&OyBTkGwk?Qu)4wq^BZgCq^bxv0i{I&u-sGRG!9y2m{?-zBRBMI z5HqtKg-&gUGEc^Ec0cArAUtwMAxHNbmQrkwz-)J-MIN?9{qUDio;U#<5|@?X{)EIl zp~LV&OA2On&7+gogz+Tnq1}&>BicN1A&LRFmOC3cT~}!2UVZ z1#W7Uo{Ecg3u&mSTbh1GB&71Su&>yHl68M!1h&EM27?n0ge$a&;+4aSo8rG0)@EBV zZ)UQzzvdZKr&_*x@v3WWIB~xl@VrtczL@TMBfollS9{dj=}FluRD+|&y10rXCvtHe zyN>8F-Eb2cS2Kmd>chrS%nF4MSrL0WCF#K1Ukh7o;%(fmPKHF2?=}-Fhl0~F@NKNOD21n_gEfO5 zrUM1Z!o$6Bx~Hu7XWh6B^ayk1z|bd}2RehL2pB^Nj(i9w5Ca@s>|WWr2ghBh0D5+Sz=tRm@TAOwc`0j4_>iblkn;TmaRN#iNGg_c@CDv#WmhXLF5Qf~@j!Z~feQ;>iJYMqayGu?<}Hw;qr__&k`tAq zp8!>;#AywTsqSTF{asAEmuI{EU9)M=zr7lakvxrEI39vvFCxK!|5ea{h^?G$uuDNp z2JqIPjsa>^s1&egaF<#&Bd?xX;@Y`WHvT}JMI>%K;ZDh{oYm;1A z6(=4StU@zEz#uaO|N3x>urx&mD)jji$+E3WJ5_unC8izMXs@S z$KJPdO6Om^f1aVcZ`T>Cea~TF!cplxGtEHS*7xGqaReDo@fR0Wj8)*rs~P0@fbE{I zE92gutxZ3hX3>&BqdX7DM3}n?)K^yifu8F^8E|j&Azs)R(%M&4L@c(_DcV%M5Xij> zWZN>jV=rFRSv*@kq@dp@{e1jl2K0f++U8d!1mRyOEY{`T?;JC>8AqIfj9y@~u~mp+ zz@S+^$IZmRQ}ujufTA{j*T?-vVE_M5CIrco0b%B35_)DEVZ}}^Z~zMZD$j#eXRQBT z^fD`)Ieq3w-(op7S~E)WohjB=obKDaD9dI#GDb(q^d!v)8Hab_h^Z>8Rb zrdx=7NL1uQP_3IT1kIG*CSd?DyL8k`MFB+Cb&vt6dFN5XTf_LAQ%%xba+zJ*K&Goj zq)hQzOYOL{(mhd?wjko%F^NuZ=f1MSv8%UbqML=Yc|r~&xc4h}ZwVi6P_YfKZNZMp zupEK?U#yuhRxBec+th{ZrBN&KkqPP`PUKhsI^pZUA;>9Ascn)DtIDRUGo5HQgcY`m zcWZ|0x5*DWbxKDBzpB=2QgJg-_6C&N9Jzs9=^ei@XM4C)DCykTM~%J7WvF(6rBBx7 zZ@+Rvicjz%m&LVb?}Lt}4n<;zfUQkAJkCcfPtX<|_HL_&n{ec{S)%iKW_xK-rBNxZ z2M0Z%A6#InaSaAwn-UwNTD$oWRZ?y01wN$ALkth^+OrZhMjGo{U0rp3bBd9Ka?q1g z$M--)AP!$w+71%FsG?wTL&@O3?iuF=p9!8>l#bV z?e2${cJ5hc#_)wx!NKYxTsDY7;Za1aODIU@IeFHwqS#KVq_|)(jfz-=Yy8TtpV`i3Z1yO zA%G7#(=FQ6a#j_Yob_FnwMADS;YlVRV#}>X`|Z7NrW$n~2w_xS#hw9kua4c{?=KCBUz0l%gvn$Ny+N(x-is38 zb*|{K6dLv7Pg^IBpBFP~EyPdXkA_b^pS|bBhg>f&n@VYx$r%qi$-SMe1OJ?I0>fKX zigwuzYB?nCoVk=~_6%cWH+laumM5e=+P6*nEr`PAF^B#Zi*5ULSabNGSj>qHgipuv zAzwA3O$B*kAXMM_j0;AYnbrcAwLhA zKXSQ7HS~lWq7uQqg%m@7%NxWeJY1Es^`F?pAm<+le8^euAYvJ(pd-*mye^)h8c)*+ zZe6L#)lc|7)aaptJ$~v|$o_2tGU+|^%K~q^_R9^#H;n7|1vU2v`$uSx4$R;Kl<(F> zNeIYox3yf&dQ;7nFXh}ePkV$1LqOt9jwREYlYs>^6WcnEo2)!t{Itq!+Xl0ijJ-D^ z`t926fNg}9{)sRs%ZdSOev62!XD;37denf9=qMMs!`=Z;3q_wDFW@+dqEj9#Pzp5x zW!NYnh88+j4ETOPr`}MYMcRUtAxIRKak1zNlI#2tO6D5hR`wo>FEylnaqYklr;P15 z9~Ji2{bAlAoC=s>9pU168d3A(P8?a?S*sfKVh+3+Az|TJIpj5t-8=8#PG~>?n#7q= zh71A;JME(|rp=72SgutyVzz$!uxN6dO*;&69Zv+Cl<8%L4t@3db{T8wwonQRX!B-;R`ZWosa#TH}mYNy4F z_9k^RN#8!k?`jlwf7`%l7?=e>xWRD5CAa~0tW>~@sWGOv>^%;buqm|Lydrq+$oHiS z#4`UWzpdddXTRGw_aC1b?;_uSfHjD^sa+wI@h*yc(bma)3Y8qb)$+3X)MIw#1P<`; z9_^>juE5)%fTnKYQ>!{j2XO32EyWEw&@Wowjs28^0erE5mL{f)oIqgM)x(>!=)4kp zCK+>vLn1*caQzzi!bjPxJZMO4ap$x>8GxD*l)|BGJtan6z%j&;P(GyLl^%e2zwE>L zk#yFe4#JqHz>=K=4`!)%is2pi<-PlwFr{0wnh&`->A(rmRA16u&xd?2 zjzyV+S34Kg2qNR$Kttrnn*)RgDxk`4lSWY&d2BQNv)g1I1n0{j$JJ?5KJC7O77g>Z z>nm0GPWo1JqH+DoHi-6t6l%+O`Ag!pMK3t>Frjmlv6nHtX04bj^vDj7p|R(He)o)o-JE1KeT2&Qw18y}9 zH9ei1>ps_Z#EInX86>t~L4hG&GHf%LxOunoAxWoU@unt)Ruz7E8<`rVl4)r<2G6u@ zOlQj5Br2Y4Z&r_s_4Tvmfs{$3XE#yi}3`&lv-7tW?yOjYLxoJ!$uDbEI--L9uGJ+Tk|BY6YbIN$nycI^e)*}M@`N03b%X$8rpme&;>xI`$&J3`1 zZP(srdegR?@Hg>MBsS-Gy&3aU*g(D7xNdtsMgZ$~yJv71Wvr%f%06F2%RJc_OrHUy z!5S+~$=JtJLpgCyj(rFW2cAAH*UdQrlrUx~xEMXW8s{W4BrEZeZ}04D0zTUhV|jdc zsAjyS&|QXr(L9!8BtoBOTSSes&T%u;7*=U;sj%j8$@ZGaV&Jx2;?7Cs66vY1ZhT@4 zfs(fecY_mD#Bt=-f|)o>zB*FtJ?Yr>kYa1^hu`LvQQt1yb8&`1yc^DTXze0Csd&BK zsq^OZ0%W;-vB^Xs73N}dtlRTlRzOl~nH&B_A-dL(Y8%cTd{D0@hyLRFAk`zP55{n4 zU$e*6A0`ncc=XcJ`S3kUH}V(ucaH=(hfapU=X&(T7WJC!V##ZGtHY!_sJJGC$jH}g zIx*)Y_8PW*`Tm`#qgJ!CtEE!)@z9~9^oKW%#GmbWCJd3S?o4~{og7ne%)BduBtZx_ zo6wtuA9_-{_Q6`0H9FQF&vTFTjUgk@+Z}r_OHR)p8Jkz@OQU`(fuK zUxL67%7;A6$$K~e|Ev_mhq!^pwBJwwONz(8@UoR&KnJsbl)R=9^K@fEa}!8y4GaFJ`*C3`lJn z%&yIGNElNj`4)Ibe_yVM3+@c`C9LJazK(4gYJ|KR`R)Z z!h&^A&cdz&o*tS-B5ZA-)ogvZhOiodH6kQU)rvT9Ac(@%YhGE4xsa7(mHfz;Npcw; zeb{8VMt|GmLj}>#Ac7A|UjGVD6+ij4*FwTXIMOD(6Q|flph8NRk&n;LmdUW$87?j*0muRx=g~=EUT{J@ic*OWlHWdu?EeDTimyu z(mr|eTk9(`=_7Q)mK#$8g$M3GwLhYNy5>`c`>40OGtUf&rJ&>SvUSYv7Np#;PmWn! z#P>y&BNOLR^oPALTk;^v8T%l&5eDey)4UNvwZaMv6iAxX#&h6s38oUQqWXDYBJZ@T zbJ6*1wanmGP^(j=cEadJ1C9<2Ax+{O;a1>v{K@M%hXXd|NJK_*(&W325nlF>n>(xQ zUvtF7J*FN#^Lo8)(BethoK-)zpmHLSTdy%Rw_u8<`vILhS9JZCC&C;So!me)T6^v` zFR)06OgCbmi268WALhw|^{6wPqI0ZLK>k8ArRs~cyT&E*NERk`({wTnav6H3L1r3T zX)F zJp+{2S*$;FHh$G(KjBqRl)iEodc4lNord+Dcn zCAHd+23(Cx!5AK+_+(|;WY8Uiu(+*@P5%^-{Rg1;H%?___A5HcN3a1-Nhi#u;6`w~ zwHX@g;O{DCvNxTA5Kj9jrrRB)~g{5=uaGWV7~ijHvSLD;Fsi^WEftC*NKG& z@-$KBm?8Mvg`^mn_ZK@tbj6&{-az%e9o}_nNfRP%Sa_uLT1xJ&SBJl54Ag%9#fWeJ zFybTh+Vk1>6~?=Xm;`jCceA85jogY|r-6O^t(Vno6Rg$scGJ->hm~&#seZmCpf*V! znWmVubdx|~ZX&$|q|qa1&Qxe1;<0N2@jh49uaBv%J!GXyZ*WvfaDdzrwY;eZ%1>7F zw(v&CvWByx+N{@%&#wdHiw;kswgP9vcII5M779`NU~1M8z2PJ#JNA0K;B*S>*@xoW zeOaoh$Eq@mjh>}MG(ic6JHwlFKXw9Fd{1^gBTBg&9nfXO_I8#&v%NYk3fF2!WF7=@ zSfhFL-zIH;_Wl2dl0NNbI1){=Xn_mxWJrK3)txXJr548GUV%Zrl~Z25$rc~lqPprp zXEPF(bQ#s%7@pSDbedAbikfg(#ZJw-TR{P$B0)Os$`oDqa8Sekf}G5; z8D}F&{;Wx+ZEYHFqcyuz!6F&T(u}`z>)_QFwbF-PZpgg88*<~tJT#QOkw+vz(OxjY z(z5%I0aHZD!OF0XHj}b`k7{h*s@GF-&R(pb4E*ST984kWk^wyiq`OY0CB*1+@OJPE<{8@YX+3){7 zHl#fXC28`+0~isk5CGI(3wm%gfHc}DGBHP|2FJ8($rERA6t1CTW{Rz|b|Y_OOWAB% zr{-qpSpM+X|}| zGOmt2Toga=aBJNFcC@1vznS}xw*`pciS;rH9J9tITTe*Z(E>|-$j0f^;g5kBweRhh zhWA0w2})Lc!9QRm#?|en=!t*Bi-HrG;Xj9sO&CoLs#g{-*RqUR( z38%iSYF5!5QEK&^@JfM0jMW29b?!z_42a--Pt~M$Ivks2&mrFV^jgYM^B8{z-u(>n z{Zq$I{+ibM*TAr-2+lR^xX8=|(}4Acp^lxYXF`ci@GAApq5?+caYkai=7)=VdgmFf zW$_xh=+O20zUQ=NvK(~!Efvy(GB0>}l_E*atWbc!tmKL5jL!lZ3M0JD+L;M z(dtkC27&xZSnmX5vd5j`;NFPXyiGv=_*2%mF<5^db$qR$+u^3xRo=koTaWbKq|9i4 zMuMo9c^_NTmCV+01~t8aAobv_FbvHBr;qTvC|2I@S_v^wd-_)Gy~vuFdgV$nJGta~ zcdf?VATJ$q#pCkT8>orT;jhYe7RgBX?HRa#Jcc(v#d(C&3b%egji%%xR-;_0NXQhS zDzWXQ1jiaE4|`2c$~(2`d4Mzek@Hs5N0w5E+t0E{!7rS8@0@V<3<}X7e(IE$w&1#@ z>V(JPE!p{a5fM9qxOh>&N3{_(i+deks93mAX zb5qRmY+7608@rc@EGD;Z$hqd02TBi+xZ*f?YPoa~Be!ef`qyc{hi5>$f1;+X6B+I? zX26qqA=QYxc{SHg?{i|idAQz}I}xh#5pwsL;P200koyg1{@a4|K2?Fgf|`+AZX>`50mj_8ZjQ_{+?z}i<@J_OX8|iT^+HW{ zn4!Zn-j|$e z9dx;>GScpG*|jN;WS)^%jyd_NxL37+ZORd5D9m&-(3XG!gGPMUUlnmCZSVCjpWcUO z(d_rUd9^)a4do0(#NSeG;#7%!z01ibvs%WZ5j9KRIIFbnW?TD;O4mQm0LPRc6`c1y zKSN}H)v@qoM|$3>BB_-EtbYY+}r;ugPlW+CbDATm73se zEM$t}rFo-tzwIc4i%(xoekg-x20kMY69Oiz)jj>ll8Gg$EeGO_!Vf>LcdcFgd@n&; z#zrI|_gYzROu&jd8bBIjR2j#zu(dGyI1a2bZ7-Uqk{fbuuVBS|fzEo6(`F^SeiPL- zIO&HwgQ{`!y1w$g4$D&GVnx7Ndw;FmY-w-$%@N zQavpF=79Zd=r`{-UFqbb}qgnl&5OHe0dw>o*WSu3^y99E9 zPFdF){|p=WM}GUSi!J{J8wD7(L_!+5rn^-y8CF%BMDT`_c0yPA!Yc#PN^9b&x2f6^ zY@~wU_<>lrmx>Q+^{bett3t1`p6RHH$v)3eKxg_7MZr1#+(+g*=tkW|I7xxEy$>0H z-vZ#Mr^@OXytb}B()TcA8U0+|`T^ba{PgVi#97(m4Qg^-yj~dNm|ikk2EQ3)H-s1E z4F+?}7_&qj#i3)st9*>Gx@>zMHCQIb);r_qU3=|x{_VB!m-IDzJHXl&`s!xXJYx4D~5K4gq{z$0lsv1gSw z8bQJd;1v3CL{gmv6WRurw9WNV7Xa!LVD@l-72TGj#2jjhQs~#)er3diW{5Ap-JF%~ z>apK2iTP$jk$(N_r&98p?HsxJgp0EyBCNAHh^GoX{I44K}k3ePW3JFM|#(MPZNp{P@e`~1Jort%@OrbY}0 zDl*AjC#)$l2bp-~-t2eAK5yrq7na?Qp7AF>-4$or0UCBqe27d~1jTZQXqS8o-Ri|e zI%l_DW53w@=;meH=YpnjhmO2(SUUQ=zJ%0)9@&vu-hHM10ld}lJ0C(H4u-p^HG^WW zQT;oYBrb446<(w8D+f@haO~7K`A8Me^HL(N{}J!`v)}pGd*3f!_%BGxP=%mLi)PG@ z&$gly;4iGw)ws9Y6l+zqU9-%!@V>GEvpn@h+_O?Tovzc@H@G@w>#Z$8 zZ4b9?T@MlH5g_4RxObS$N){WSvaM~{qfqhth(w;!{U?Y@n*X8*4{t1WvwNBA;L&xF zhdY)a8JSeXhY(L_b$mZ2wXCXf3}0@K2TA>o_LoGC>}XuT)Q5?Au$EfqIr@hys=uzT z{+Ft2r&LipjYNPj%lHrP;iX|S&r*K&0W*o58eGFl^%qr$sV1C4crw{ozZ@Z z%Z5{)U5j73VJ-)lu$+4W-Sv1n03UL!*L&_hR28dSV)mhV#Mtu9NLuJbnDe-S_(^X@ z?QFPZL?0Sh<1+c><E; zemPuN)jxI8pkxP+k^IQvd-Z~lMX@hyj}=xpv*`3=`N0>@9T{yq${$6`?VEu>P7Y+5 z$ZZeYS+v=;%a%K9Ip>H_b6YFFOIgm;^lF^_V#NUG6Zt)e&?ff$Wt{&%svZ25ee>w= zELim$mh4a01GFdnF#w*xaC8O%x%*?zIsnyY5+q;wj3M%{vj|$|Ft?Jyn(IGSnSGq_ z^3a(WjSS!O7q@M`k)`d!gG--&=IgMH%L@X0Qqd|gMC0Er48fl%fPwaKc1nD zSvAs1RSNJhbFfW!j?4%pynLNmeAIxO>v^tY!|4@7E24U+6&Z(S*nlFGsDYg9W);uY zC{qFuLS;WSRMFg5SATV2mQrn%;UBU7@k_R$!!a*xd)jB`bGOeK2-yoPRR4;c@SjKW zLbZpzaFMNUc8ol(tA*S^=%FdI0ALswj?)AqR>HIjImeY`UMVy)dJ z(^MxdcKoU99$Uo_)!-Txa=Z~$)x0t2$bAZ;86*E~Zum%uD^>}09N(i+n+@OAs#22{ zrD0!}<$WWzt$#3j7bwd6eS!HO)vFkGfL3wqUQkSrC`aBX?lGw;r)i&Lu##6>OWOpY zXk;;7Puk}o-o!OzgI;_VOpqrE*iqL$SVyFGLrKGiNxQNR(N1O9m6(4qpYaC;JONdR zWNeM*%s4}cnClHD)u%v>l94m?(}|){WbFl&#;fVp1eNJ2KbAh?WjkZRasKiX9;okz zHSI(jv^Jruyq$Bp{rqHyKG8LSciNwS5D=49zw7Zoy&%HR35kCL83FQMs@lwHp4cb0 zGOr)K4o>w(`2%}i<8eCi^*5jN_>O{+t>#u9ZJ3~$$ z7tyA|W6*0*P$s;UumPn<#Z~~>M4Mtt2l-><6O-~CS+Ju{AFAhuI5PKn z+PGwLl>!}H8ej(#)_QLtRSUzZsqEjpu8G2jgi61FSs?6(%JU-bz6B@AVJ{WKa#BDM zuf}~-w?uN|5dw$v)E=#AvL4iyz1R;T%$pJ5{vY#xf7R#z=b=owc&0^L4OTKrF)Wp) zvy4#N*HgBrRhwtLU*cUbpc6O*l1YqHXeIYBZPr>Q$@hK^+pW5`_hP=q{L!IM{f4h_ z`u(?P@tKYnWH(}8K4&gv*%$}7(cYKC?K?`&!83NE4RZ-fpe*Emqf^uxZY)PaRwIa7y_Qu?nL=bC=B$g@wJP4t(DHiZ(h%l zuYK}92=4Y?+ld!`m~;OsXZwagr74#XW%%3y?JpUG@k?M~`3C*p0!M$q*Z9ZdM1c z$WgB!NL4-Pl)VJ=(wb?LE?wx1*Z_NVBNM&6B%ZPS#E>xOThQ8F56TPC-S6&D=Ys9C zc{ak4HveA#`{Bp`4j)A9it2?k9FVc(N?Z$67EoNGnzSJ0M^UD|0oCY~s4WbqM9Q0f zj)K=7)AA0~`Ijj!qSqCvkE(CdPR5CCkaIcAA}FAaR5HN%M{G<=lpLe0WLzSo-+t*> zKT1>CMB((~K-o5(xD0EhtTm$Br(S-3q1+>K__s})gN{tI`Wa**$i~e!5TsEG%*ke? z@>fOmvg%+eJW1YID=4_g&1Sp4N24(|G&I04>Y}t&%G(o3i@4(INJrv1+Cw_zdslHIR34=cq?fp%q#5u8LtCd)y{1Y*SM$_@i${rD zNQY8)`mK_NK2UT2dMeuSZ0v%%EJ!7CWJ;*jUy|8@oPEGO2C@OMo(3RFIeYs9LZC9d z9{C&si{AA{=yTg%*A3Mm^1N@9;%cL28jLP@URJbj!xznOaFPKb*jtN=d)~}5`JHg zbs~Z^YViy>T~o-63u?Z>oI#M+6}6gM8ztiQHE$IoE0;>&JbsFsTkRWISY26jb4(+% zzx71PTYIDJ?~=sY#S2nq!0EzZnY_^joZz#72G78;;)44e5@f|V1t73$ncf_^Y?d=O z6V$i$+p!%ukRd1P$y=zp!8@f=Ng4$ir~2Cw8pcxqALzB2&E_}NiNq|(3T5-tpzHUz^FaRV!j*Ws8V zNZAzO2T}52JaV(nRkZh~3kbS_E+|L`SJ52>^E&(GUAzx-SH0#xBR`tZ5msOd>G|VP z@gW7hTr4PGP+G+D{Cxz!lC&rF07r1jAbQ0N29{a*5GexKQD15>KRp`#WAYK0DE#zn zf9LRDdJ+GwC}W_5C*Hw8(wv&?64TiB9LNW|Rygcqm{%ZihW)&id-q&VNC1Magr;so z?uCy;9eZD7%4Of|d@jJs{jvC+HVn{)b5${*VS;Ht8SLqKSL>f1{m%vPn#w_{2Q-4- zuRPFDQ) z8Kz)Y3VCrsDB*=pAXoK8fif+Kp*l#u*lM=KTEZ*8S6;p-wD`cu+&cG%)r7w>?R>-1 zrN1H>q(ImQQkFY-au|*rmWQ!sp+O9Q97Hddf{Q}@?$|?Ufe-Eh;pcu1|M}q`?*A{p z08rU%z7y=d3IM|@XBN-AhWjC+*VV?E3Q)UcRDyXMmR!zWHS+)DH#Ebu~0)W>o}4hz$rTvZx5kl3*iZj5Lj?Xi(Cq2mxb6wjc=+ zkX;DSC_`x36=aJ-Ac7$gLO|IfAX_kmBqAyx321m4lAhO9^TVm>nIBVAGgb5N)%)|R z?z`vQbH4k1-zgyeKgA6(1ZF^NKSR7kN#|6v6A=NtZ?y#DXS!G9{En`U7Rv$vSjwM;67 zg+T~6$Gv1K*#%VMKwlPr1KAJq;>bdh?OX0AMfMd3V#kTUEHhZMm)j@a%)gRFYiUy7 zf30i%Lq_`N>(xt2Di)veOEujInk>zIW~jI19clY``6X_ ztyis2oG$L-B*Ca&EF&9{W;@J)JEzz@GUOKU0BKobwIy?7POD=L+w7WYm&CEA>upa@ zZG~n5h-NJsgZC&nIfEHQ;+BgTEo;-n!|q~ECf%hP*XYxO=4w!?taGgLXg=BvyukF1 zFKcf1R0XL%HgI)laNMzH@MWSMC&CD=_t!1#eP> zitYCmS{^FCGh^UgI#+Kxs6!1lnIXt$_n$WcDd`Y9pb49ZjIbw#+jL@VeijEN=U+hI zQ}L04dfnJm?+ubgfd4o)h&G?w`yQd3&(n>Jr+$#se!*p6rmQz3Hs3>%~-JQHx zO6n!=Sg02w3Y|)~$eb%P0|OtQTz{x!9||mKq)UJ$c=tXpFAfBjB{kk@#}`i}x{JH= ztxMJaGV&=lDIANI7EA;*|7e;s6;p5_eS1aJ{^`#l^10Uuy@+7}c1qJe3}e<;@=V(m<_ZJJ z8Y@zQyr%9`C15H-)XY&d&a%EswIc+vXkVcBD(wb8XBW&j>2GaG+vbWqu-P_u&H1F} z->#p;qh;GyPXLiJPv|0B2;a}D#gfQDRCuLFyP%1GQmR{q7OVGp|_prr=m3uq2r1$bN_3#Xy#tDOHe2#9)3FL<2NS=S)Hb=J#Yf z6_ec}&tBY86b(3V0Ua!5YW9(hKGopT-aMowzaK*QnBvLheYIm*Ic4yfjkC;4#5BGJ z!wRo2Pb8ja-W1b`n-preO7~(R?Pe}p#1J354olf*sP6CS9+eo<_^M~?EykMXt*1Pv zbCqBxzutzHhC)cE%^JM9*!YnICiFBA#tEw;Bkm)Rlg=FVr+hBTDe8#a!D0|)_xTcnvv3aeQ4%9v+pv<~65t9f&dt5IDb zNMCs{_{+@<%|%{(iuQMxFr`a`(CL*fn`9V7Vel55{-IFE4fS?H4aVy5a!cQb=P%q< z-WZVlP0q+j&ftxU5rMxL9D(HW#hzZb^!SbS@iNOliniwFf)bpbppMqzWemVvCE>Va zM8AXAOY5e)CQ{YK%G^1|TLt$o~5mSka z?FLcx?RV>j8LU!uM6);_-Vh&SQhwepAat=bGr`}`iT92GQzjL zZW)~*!(_?<@DLh(=MlP{yhGTQzH)kaVIh!vEc^$!K>~%$+_}j(U(!bZmbqrtpRXHf z(I9>-W|ZLJF~$ooP7}yp)Fbwm_NT07XH+ClEz(!-0=%+ds55X<#2KGetotm3<2c~d znn=<_G`lrA1MiFnt?>z#2U}^)pb=A31LH9o`aJsD{iU&a?Otk^gZ_mjmX_uaX@V*J z0{WqbqO^H{+q4H;HPYqy*b$O1LLI>(XWh2?P-yxYU4^ug(_t8DP)9}~anF_pkVm*@ zcDzQQnarfZo^mFMiuJq2kZ%5?0h&fr>8d^8Eo6ygJo`aW221iDHlcm`E$w2J7OwzP zkv-9DcI!;zo~o|6#lH{gZNQ=|OwkW%B;KlnZdvt`6jdus+J>3b4J^ebaa0Y3e=xAE zTrYg`T@+2n&oAG@+pan%H%nQkE*)aX#PG*gq^{>I4*`WDx3Yernq}KHjt7We-i%Z~ zIy;|maNFoTm*%?|yE{@LoQ4SBQxWjmsW@z(*kK_^FBz(;;G>$bBv>$vyC{T#;|SRP zI^rpc42s0Lb?PCnO4jt(r;bu|=|At-Jkl2}ez^H^*-7sx6i86M{t31&Zm5+w|Euhx zy9_Lt+Y$Q;VaikZBl0}DZYZ`kaUff^{S%(bV&@K@_njESGMzjuda-c}`o zTmabwpt=){xz%PFfm z5CK0cPM6rekD-M#71|^%+PNI(ca(*fhV0h=VAmV-=1y!ZTx;2ot@7y5VA0Q|KDT@a zKmJFNn}0xwP=yRo@}JPRihId0!4VG1zYT$Ad0~;-6Xs zatp7QzUNwp$|YyXD%Yucno%#@cO5$)L=8p_X!Xq5LC+t$0V$2}?SScnmkFpQeiD9% zBp(zDY9=y_nC+x^f2Xz=&hCPKpVY_|QYV&;GO z{8>4^-fQkUkr2t`SYP*L6qM{1)`YZ;VCB|B>)3T3A#HO4h+B@l72Zz{N_P8*w%YW zODj-6<*?1hm)w&;weZZ=J%{V;)}r>pu6w&ecCUgqQwb+QH-92c#wM|8w$UfAFLkCk+Fb-2*JC*{yD?bN z3QLn~D8#Y|ZtUSR{h~U^Q*K)VT46>WHA~p>!GzxMIQY_%Y5H37$aIbwvYqTh8D(2`C7VJf6_Su7*-nv=kdUEiM)rg$LMbNuBqsZ82V)m$ zvfqW7QOPbdVN7QCp4aodzxTPW_r9L{y`SHG-S6-B$1^_1`Ekz7dCswpV;yUK*LSVs z!+F-km%Dt?}{i z+R4Yyw{zz%!Cm|UzrBF}gaieIemnW?CjWlv4qk3r+I-M1K++6 zd=BCfmWffJmgNJnW z^bL+3H#~jD%-rIv_qyTj<9jnWB=l|=JUk*gCN?fUAu%Z}{Yge< z*3)O%`2~eV#U-ywUsqMv)YjF%YiR7icXoC6^uF&K8Xg%P8=sh*B9Z66eEqiYeQ}BU zbA4lzwnhK7{hM4|Antz`3poC}VE>z3qJUgGewzcn-{j)j5e77FQ6Ap?M|O&t+VNcr z55`PGzlrv5lKo?XMg6~$?0*UNA978Bgt)nY z$>SCUK|w5Lev%sKzuW))F~G{Q3ZZ@|x;G(pj>kj*)q!J=b3n2E^Db*(4#<}Sy2-*Y zAS>GrV|fd|koK&Cut`MA^MY>HbgDka3!4NbzRFdl%tfR8!4oB&t=d%K)QsXOrq9^a z@`aI#ps~D2do!ox=yGdrACaq0cbfG8?&?FNnR!(y`2_wKX8R39DPdJayb3lnq z!TWyxMLz46h|*$r*8zShD$MLg=t0RZ`!i>B<`kSkj2=?`>V1X^;s$kK0S}G_eg=%k zO474z91Y{+oAu`X1eceu#*18!;&(nX1wU?=x{)otief0!3&~*SQJxpDb_ikXLQP3u!DOn~(3F%g=8PlYaZ2n@jDk{ku{S zOOsUq_0f)nRbko)639V}9P@0WZLBe*(DK!?W4F#qWeJxkpSEJ6(H3*12XrVZO_gV6y_uYwna&q+OP2as z^^&Jr;ew7?MD7FNYGqaw!kQ9qL0>LJj+=0$;|;f6IH2pZ5OJ48E#()ldu}Prd}Fg8(C)?4i}OIi_BGK zPfI+0U)fRjUa~TdXFUs2YrE3!OTvKRL(4QYMZ($O;hTU!gld`Pyr6R3Sa~_o?|I^? zvd&6Fs+#E6u;i-3n{EjgtPw#BD0>jbjk}>e6-c8DvyBlKKQWJl`BTBR)I&~wvQKW? zh87b@Cp7nG@A@3~!uYL~SOQ-Bz>3T*fnQm)+jI-c+B~`gLRX5%6Ov{ns>XE>hG0c! z-Fyu*8v6r-l(Df3jWK6aGD5{q%Sm1}osQb2%e6g9LDL?wa-lw4AxItsoZ_8=?D7i? zzQ+OW%1T_-gB;-hP{xC$g+p$hz4}yfFW*m$uaS+WYL9KyML4Q6R|7o{iFG+g2`DhZ zkXh&9aTC+u!*0Je9@|O_oc7o7Q@Ztj|K$ryXSre#P+$%O=ord0oBue&lMZGJfP?d# zeda`rttkW;g%5peV>zU*&T3jsOK(*;LlG1{)Gt!vQ?j>~EOO__I%ai0MS336hN7!t zR^m$#PE_gW5T9mWY7bi+HpT5me@^;U3jte@t#8gU?CK3iYr>?ySzXx_OekMhDi!M& zE<1kpq5g6iT66tD znvNMFg#!XBvr0_F>5MihSY13*d~A(`iEdSff9&aq04M1J)@n?4v-^43+!3oidg@m2 z3oeC2VC{i8zZeTIp_5nrmh(yl_lYxLo#;T%FH_;;E8~iN^&LO%T)$dybA5gHF&Q!r zYAi}EYr|0-wWqKh9FVj!Jw4wSR*h@N$!Qf(zI88Lj#HmY%+;**PTJ?ve?uhO>&2V7 z8?HMWUWAu?IpYl4geJo3IiS@b=xPC#k9LZc!2w;viNKjmeag27Wqwn%7Al&-RVe#< zM)%0+fRM6BUC16arM`lU#q2edCuACL$$E{jpAU?hHKaqah@b?)s zIxhH-6y@^O^3f(;$GY@GCX?wrIyzPRyAggw+fG>g92iQh!PTaWqnTj^K0(%$VNzb4 zwdbXz$?N>`4ev@H#|1rp5z@O^p-!yt5=PN7IiRR&CW2LkyVfFpnz|L=EJ{&5O%L;Y zZHaxZDcH$YwZ6D)m%XQUPhO6zOS^8+dktSTb1&E6lMq|>45k*0=74TM==LZ|~_EiX^8sby6LYUJ8rP&BmR72^i`Tl0NxZ>B$8|0Gm;(s%RW>Z8{UpB5(&uhevl~?o~)7_yFUxMA32kIh*fBOjR>85 zeu?#pEduv$V;_J`+qMr*e`iZj3mZ3UG9PH2J0GA_U32K`ha}IRy<8GMjNVqE+Kt#C zrX1CRDN#lnrH_-baohT3WIt%K^S-#}Wj7;2!o;tf2FHg?$4ad#sEB#pUa{v*wiC0> zdTZl)6}?gvwjw6@0up1aTv-HmNM;NKNo`=OW3l`emR)}|?q7yYk`hArZ59non zJ9BY@XO&NnI=<>p#S!b$fdyE~%JLdTYQxY7X{SI-YqL~SMI7EIj>V? zp>e&ebZ^{6(+WAjAQUz0F_f4*W377h=ravt0k@dXp(1o|+uK@9)3VQfbu>e-R#Vny z*N4YvN_ez>_I$mp_+@S4Gzq&3Whf&1+3J+7l|(E;n9d3>;Uiaq?o`U?!}E1g#b^DWo)SEaW<@ZV{dP`K^KgB=a{>ghzMJ zQ7q@;okc1s4?B@^(Rix+^p&OTxk)6ULUBp@;IzkUDDF8}Wv|9*@s zROiE1Mj6ME!-kreI3Vl*N6Pclqf30p%@=InQgN}jHhna`4_)e)bu(W=*JONtxmWC{ zyB!B4WX3qh0S&pLe85aq1TP2lVE{tugwoO^|`8U1P@$wEMyi_7bW8m97;d z_OsGZUbd54mqzpmk+^DY{2Gt@67^R^`9h4bjAwCw#Iui+?z2idLhEs=+3_UoN*F^C zv4`41=$wn5t0Lgh{PZ;HJ}n@A!E#Srf!ScUq6s|eirk;27Fxmy%p=mP>Ku?|5bGtB zl0%W^fR5^-@S?~e+(s5)ggGGoIWE-t(UTDK@n?&WO7E2*K&~WfYQHHf-9%=BKKa8M z5APtfMIfB>p1yb{Jk_D{+19B3Ml*xG=OGPn>4opupCEJ_I$#=}WkX2`IA7Sd6?&D6 z1BzLT#Ql2q66{E=Um0W^V_MM@3z;x_3Ij@4?UdL2bU`JSoD$fP85WWHBJUv=?}y5o zw~?JM6h$mgEZrBRA?W-hI6^Uygo%>hJzPLXNet43H4u7!yC_pu{80{X`S&pPz@751 ziL~{0*_d{Pga1)kk{ zpu6Zs9Z)dyxG!;yi|%MWsQYkNbJITcvz4h+Q*lNK1z3-Mv;VSGSS+ z@vVYmn>aDLQCqft`YU=iOS{?fm2bA|1>4s%8kPctJKyt49+~Y8iof$gR5Cb{7Y_Tg zU;mR{p44Tltr_Y1QG%v)J-wlt9hHrl#e1qg>zO%$ZzA z4@-^CG#kr`buxBklfpM&AEU{ZX9m*u5jh|kAE{KVUmhKc*KbUDp;17{}T#+hz7^SQ&+aL}BEml;?$%EgsO6vPK>NGiFA#|v8kJg7yH5m=t zi+(r$3u^w~-uC|^;Bix(Fi}9us;|x@L5UR*$osRCktX{nQaU?P-<@893twRYFB27n zxIkU&=78v`<85on9FU~(nQdHRvkN61-$+OZbGjb_ z5Z34KW5k!E&@Xa99FQvn;DGc_n7&5!>d&*^LD^A0+-*?eW8BXq>?D@mYY>CmAo#&f zI})Ijvva@999PBx+DEPfw(VVu&3gsPZ@mw6GMBzgw(Y=8th)h`0=9E&%Dsd%V1K?P z&Dz8Rt%sZTNWGE>>9~CA?1to8#Ek;qvcQH56=P32je#VQNP?PK5Gds z+XYT_NU_83)Gz*M5!vf*w3KE?b5KQnLF>4;Zrmc|;YjY^QR|G?0M$nZ2-AlJY{?(- z91ye-%NEgs(i?v*unmkjpv%BaWGq4d*R63Y=^wi<1~~Tj`@$TM zz}9IF2s7iz0r5#gm~Qq|a6FTbv~LHaD_4VR{V}V^_wJpUCFjfn^1-Y1HT5Zi?{%w; zA9$KMXng1daVbTrXRw~5NHt;0P|7%rQm#*)V~YYh>4*vZhzeg|6|ACYJvRun^{Gk} zu}EGrx*JEawM~_md-vSV6xxw;|I?!36Iyz8&o$kI{ImODyiaysxV_}GN2mJWdXNMX z>oEK+xqM7(`O6AUaRfiM-6d(CX@|2^*ITOKfKH$-2AegBIMMI4-2IfT1g!Ro_G-bq z%GRp!Z<%rlZl{$VH}*OVK8e~Rwo4(Y`fr3q26s_1=kb_bxNF#G2sg@iEooamk!sO3 z*gwDTR1f(Z78M-sW6b}`&C*k1E6iT6wp#Aeq>!>0-+?P+c_)!JVkD$E4ad*XL;e;+&?d7t3ymz=}a($+?eDBKQt;Wwujn-uJs+w*>?Lp=1BeBn-EUzb?nw4Z6wQAJ=AhQ=AZJ%_PC4IZIUAhY#kWGVm(u9TR<5yY+LF(%gr=CtO&{C5JffkMdGLZ^VtGYO@bvpQx%>>VGsiDGTs=7q z)s~qzjtJFl@$)|NcD<-|mUe4b+mlq zfEKoZz_q;#U;(6Ev3Ofagd`pR8eVTs51v2aXxBi|PAa6#B^5BdHlQ9O^L2HN_YnOx zRtW*Ca|o|Tzn%&=Tmb`-Zq!l4NvbruRf6gH@Cxf?=vrc?JcO0cw9kk8b(!oROJP>| z-YuRof~VbD^%)J0OXj)b)b{HmGJ>kzX)8_FWFMuUwxE~1rc2{zje*-GI?+9QTJ;9? zz?E)`h9`PrYkcMUnwD>jOCO$c3MafWuzXxEL1A*cw_R6A5uKN^Np4k>DkLD)l|=K zpHBw%1vs>yeX|KLp^&}E9X*r+S{N1GsVV3{52bc2SktkTIlw#B)P8pU*jkpa!~soi z+dlNjULxhEU^N1ml^VCsSqYT~_B{d*ic_}E^z@@%M^B`9%%hpeXC%L-e(cWAnw!%f zDVAZN?2Cdrg{iNvq?Uk!XPe8|{CH|u8%~A;@)B0Gn1q_piLHv11czd3Em5gs5uAJ zcL;X_$55CB@*u+}%I^!qfRS!jM17TnRl``10{Kykxnr$yf9^fs34*8aG^bId)_Vrj z7gdABGcfXa#U6twO{SN}cBA1AH|wWaA9+tIMZLJXP!zUF*;K&Y^)pg#N5X;8p; zh?1VGYsX`xbkVX*Px^Dul6HabjkTG1QpJr9PbwpCj0Jo(lh!I>$ADcqvwzLgHl^_M=v81kgmiT0>QH#l zGK}&V)xrODg%LJ|VFF+ofASN#h9<3{-sTatQB7WR?sy&e*d}>mi%%9MFb1j1RsS`H`DO zV?9QZ9>&K*0V_{3hhrEpwkSk3ka+-lV+h$!!>F>I?l<-R4C8%2}wt!5ZXbAGpr~t^4l)Kr|kR&p#0&0kW~@dyv2iW!*70ER(@Ci zpzlZA!OMG2cgLtFcnXP(_Pk)Kuy9Z>SPDuapRP>|lwV3DTlHH#ELXvI$_tKegp86R zu{p{yQ?YN3z+UgX*gg4$%VxeuPns#dMoXdMR^1rd69_+gVv+A<=koD~2)m;9f%)wk zcggzLTvkMUho}l}k!nHJv!mP(@6>Nkc1RBJHO{+Kyz74& zk6f92b_xiuPSXys9*00DaKz?0At1#l^`0zjb(sqED8bmOqAp7~%6-Wo?N{1U|EsK- z!d>~tJRayMChZvy-hG8O72igPil~oASdsQQ61QS2LNGFOC)Sd6o6umF6Rk~ms9oX% z;WF0s7jm6z zFTBv_E;Fc<_%__a!OM8}t}f7>BgkG%RS1L+y|T`?(oR`52HjXkKS*!C%x$6LeU}5O z40ktQSazGZAC*_X=m>I|dKXE_~plGJe${bMf zJ?CxNR={2S8CE#SD`EGy4gL{`x_G_F7<(BhRq1U&}v~AAj;psHi^6s5;kg^3_WrD_9&#M58tb)p-{N-Z&cyku0<%Kcv^Z zy6k_=D2b_H&P zzub6TTKsXedQpu}1${n#7Wk46XTX(><+p~}g@5GnK-6bfs z#W^ZPYD$S(va`BJ^_@}|-v2_Ehc=clqXVGJ(Hzism6BI_w!e0MMve1vK%cBZa6YD( z0o%@jp$Fi-os=y0vsJ(EH-Wg6UycJ(d(Fz`fL5a6RvgfITJ44&C>SGwJP5Z1c(zy^ z=?L}BaMv|juYc|p1?|eB+pk>)?}V!sAy^DxPP*h6X8=xpCz3eKRx3&HjAxX+p8nPL zU@;-~p7-iOBAD*P0dX}u5T6p)RhkCB-aFGB9JFWMg9GYKsQo#<;te?ORp>OEaV7H> z25=)gLUKHo&QtYhIl-(PH4X^A=L^)->8n%f{)Ig&uZtQ~<5F%$_(eFRy?Vavb4ZOe? z8CW#Y%WG_?1=EN?JIw**Uj(9mlU9J(*j$2A-Zlfy@bujr4(Nd&TNl;QLD6OjPld60 zvGnmAcHwPrmY@%c_zLy-JE{T!1HLK%T&<7#b`7Dx%3<$D?DRKnm4shdjCT+fWX=(w z&%UlrvTLUUjC&T77jmO2asGjkjXkTj40+~7RyN`UId(9Rd7r`oIZ{}e6fc$t+m@?YV+z8k*n7MW+P&WSkSI^) zZC(WWbg6Z@%}i=a&bF4sCwI>u-Fv2T&+D|(i?b`oG7E2Q}v%y z>)YKA(@6|zrstp@f!8OjZoUh(8|vGSmin&0{@M9{CP{btV69)NQqpy0)7+_v*SyMM zYLZ~4I0r<7u*#6TnW3ZpY#n&Suq_4MHphFjm6!Ew)2RHi;NUCY#?(^1x#^;~gEFgA zv)$8YhUp#8}`1Ez*H z5D0&oDM1mPIiM8C|JGwJ3iJWglQ6YL^vB}S=qZD?U~+jdsb`g+YVX!e4-X_;8ww8C zo}#LhzSgT{<#Z00zy4f!bo$)>>??^$;g7QoHj zWB}bA0u@B4(@RQx<@eJOJLCNeOoR3@+^3xhp}JC4a!-dydDjrV2x1`yQ0u>^AuqvE zwU|gq-Q6aNBWWuEn=7^z*2fXX%G6sqDLj zKV44y(V`NKRjkegYMIMlW604=hd3ag7IB&f#Uz8_>NjRs()r5x#11+TG~0;&x;X3k z?RcV?{%!qupFz79N6Z`)L~v{==?AFq0mC?oCaN1vzv9lz0p&H>02r{A1Cj+z}b-BIwv_x~Z}}e=hl~`;lPH&s2@1 z)3?I+J$V#yeK$%LL?zIb0-5<`G(QfgN|>b0IyyD}X>^_NXGR~SmY?*WwnM_d3*ziY zWPj&TB8|EK+*=^l|J)}>3NTkF*_bFO#hwt)7QXFSUq=Ne>S{d|otSkl{~Ck*^`sYN z$P&jA0ON9~tBfhZ%17>H4_{cb(mVhfMEsOKhpS6j_&5eYm7pJp4 z4Gq)r{mf&p?-aH!^+j*`tR*9B@sp|IuVan?5JF0-@FuSH;hPq5=t;F=Q)B51%A+;? zN_P}|zL@uVDJn#N_Q1I3wMp2Sf&Qwdf41O>$Cc5@?|W`Mr&-zlqM_MzNo5Hx$fPE8 zVG7D1+Wr6S-95FndGc3Yp(XMU;x}oGjkY$;4JmnJ&$TwGR~}%#%w~tSMDbih8^nE~VSqv`ME#+J_QIq%Kl&7CYt{qCtx~a0s)AN|# z^dBbjmle%`-az%n&()20VkdCj5XddIman`lMJti)hmxs?ExEEJt2z30V6|ycHlB7p z$hmhVvox4r)J(y0ug=?vg>jq@L6SWwFP}RLo_tP(;n7lyKPS$Gx?toG7g|55)Mr5t zb?vK5L8@GA&h$=>?j9bCJO(W6f7~vqe|=)Y$4%h@?6`G7T2$Eb|{^kzFT`)B$AB>fsbu4hZM|6()dcmq!E!lL)d*JD@DqD_fk#KuIiK z_-x|F{)F>xQMfA@Bp`&U^ZU_ATS(4(b}H8-s@C|-erNTnvRxnNXvc7NaY(#pu_+x9YxY!gL#&e)1ovXwn?2I zk30^Gf$l)=LK1TK1oAL>=msQP?(f*+&obO1$To!9&M{YU*dos z`S-AHqgXs6n>$N6Acbl4HV`pw^S=ZGfzh8uqko6F27g?2NT*!^uB-Z{Xl?RCH1WX5 zt?Ka`|L%dqYdx&S-X zgDkmcCgE6kJNC~a?0?03ze{=FjEMf-@PFcqMEz|-0Go^YTUPk*C!zo7{Br-0a@XY| zLjEM7UbcYw6B9QAvNMz5TENsAbd+Hp&hduAM81=Jh&ToBqUbz|d zcAP2Egh$DWn)H)w#Xr(yKfFHtcw$M6*JI#?#%=f5bRS4)SNNql8cNh0_H#{6@|3$n z-hjQZwi9w2fS8M4q&){KT3hgIY~%8_N|%0*K~Ks9vGhOB_yjcuuF5b@EJLLOpx!LVJT5wb8SZ^^%K*t=M>Q+V zZr-gjGt`pZd*hPau!cQ%#FwAyRoGOM<4#NsN<34_h!#Fl;|BvThE2uep@~&~^-`rg zVxK?jwn|P6R}q^YzmR^fc&neop7z#AS*r3qWa%VXi%nz?3 zT&%3hJeKLCn0+r?{@A2v$lc3VPc>E&quuW@t|PSRMhtJ}UfWm;T1GXcCR??0K=zv^ z>Qw9Sj{sfPy|b;+7n42|^`=2;_#Vi-%-FN~@r%NE7pi8VdoFSI5bF`nmoSMXqq|z5 zWXBjF5o$#d#wM_pe5X#7r^Dz0TbG7@gw8-OSV_8E&K~Mt-9gGfJ2L&TY2IA;5qJm^ zJqKy!L4KOz?MY&T5Pp=b)Q~2ty4uE(W(e8J&1P1nji>Xc%h{z0E}qik3q>JQzY8c@ zP3dP$_JZje*u?l17AuAI5XexC7Xn21JzzTyE`w%UkGe%*p%;EZR58HG3W4xtg)N)` zC~1v%5(jh(4Ym#BTN$8elX@P8{JN!VSWZZ3RfGA6yYyo+JW{94apDKvt&Vn^&igK( z+>83NVCX+(z<$$N$@O@qBsUFN36KYRb4xo2ADSsYErS{^9A#yU-(m!JU*gF+k*v3I z-|W7pcj=^c@siYgy_QaE2}-X{Me^Z3i`%9LnpdVl>&A`6@;YDA9g{ODFPt-uV~^P! zPTTM+J0XOXty;Nn_!DjHFp^Juq4dI=@9VL1LCCU75{hC?nA)*Li(+NexSj4C&xeW8 zZ&&~fN>Z@OH^2T1yU*F8^c5u<7N3b2IyR3l0Q_A+&?|N~hGI8^qJ@vCK1M*P0xD76 zmX_1BWuGa(8bK(>hJOG(EBoDDy zUTzm8weEDftW&}De&FKWb_YL z-BEcfGKq`mh_ySpSM?#(i!k}TXI5;?m}?$8Io_5%7E9h2v{{ZiGuo`^73^Dj(+1=> zop3#&aGd8*w0hJRwfE#&gfIn0T*HIo@g!|LEQT#Z*@``*(`kG(@4G%N8hX%~B+wTm7|0QeGK_tsAE&Q~)A9Nw(Z~nAnD-SIv0HEA4BEgaPTN50d@A_gP-vm-N^< z`$u9CJj<+^@}tQkOW%<;m3Q*c5HFHkO z8zy}+-S(@VY-(HzCj8U4Mr1%t@i6eAgY;S7jUov<*f|6RaM!@+@9j5*mTar%hUzN4N7597H+e3;neB5G7X?&qmf1z*JqU*cQB63ID3H)xg zGG)QqNy57GK;RhhV#FJAY;sX0Yju6a}5JC&Y-&6M(~S@k(OcS#|* z=kIfqZKE~_@^JU%k8ekB4Us1qk@$>2bNdX%N}kIt2m=5>EYKIr5x~|8vi3yue0S;B zrEIBp{vnJ>Q60)-Eow+OTi}Q9XeK6kdXiDP}i;EtS-6b z_ZN+8eOupVk5Mxk*Uu{pNP4;me15x`b>iTO&n|Q?|Kp4A&*hj6-Hvd!YXE@poA3vJ zQN81TS~vOE!qq?OzN_@AeF@^)81x1>QPy^4J;J{k6-w0ffN;C=Up^}JcFbNg{Rb)| z=y>wicYEHGTeaV2(Z13Lsfu`9ZQSD$L@>oVGlir>GJt)5qfvOx^@q;<&DdK;u_JhheYW*Dr_JtyCl zd4jUWw5c5=`_(IXya@YVr6I_r~QvIw)#A zQ@p-o8?u6B*dtD}5;nn;7$Q14be&i(*Bu1M5N)GUA9z+Q4!gNeDmm8K{CqFmDQ>FO z;hmwBddKmaqx~A}{|>hKe=G@-IO(0Ezk3@|J?P`JlnDJo)K^7ma|f- z{tQ5-S(g{?AKBXT`R;zcah#X-6yJrh+-V}Ab0$J=)hnbI+pJAK_IyiTbtuQcEL0rQ z%s6}{l$hR&xaZ!K_noaesBNrA-ls3uFMMju%5T{!W@ay?x5#ae{Oe1PEb_210I_#8 zi-hh3l2UZluug5!&%TBg5APWyK)tp1+AM_B-RgE6{*dF8fAzHLMef5OjqooY?|g7K z65KnkkwSfUW$HppYh_$hz;nQ=KXJKJWI!oBhMQ@)v-af1mGihgKR@(cZucmH6HsO3 zTWPOw4Nl6kl`!$7G!_|cFsJMIjQtV1w-%hZ9SS7POcZ=QEvdM;>r%7ILr(;vW^KF0>{ScGMz*}_@8Vi{G%%V6D8t*#7f*ztk+0+_!fC# zrJbrsvgp7`psvviI@vqMbTPuq?)SP_7*w#AuKt$X@HgwUMw72!{CmH(N>b|EAZSPp zq`O6%mP)maSwR?4z1!!+5Tb>^HqQvhuIFy%^X>9N!@#E62&>0|w-2PRE^8GCTGW0C z@jKC`BMO|kzpCfZ=+@*~*KDkSyTGD)k9$Uy37AtGL3CF%vdkF^9v4H!TXQ5m5 z5y!`o;!HETf9dD%?#lEy$vl-*5p>8t7(8_&v4uGcraekO1uT=D${ zn;`doGf+5YWt{Zfe1wXdhDnyi1l9V;yydqqztEp$VHTKTGUavD4|1cgTNJ!cq@y<;YKKLS&7b%^Xm(W!w*m&pkH6 zem@TeyS}g@VnjF6j@xthC-xc>cN~Tbyhh$TfwvC3QI zG^pBA*1V>|`VcP(Gild|mR(E1cQqT&(vuGTxYWEfJZ9&Di#T%m#YKjGl9`v6P~mQK z(|wD6Q^qgz|G0Gj(nkyZp`zq3O%%oPA#EI|mM# z(9+aWyiFaDNCf%JIGt8M$g>;RZLn$hAI}>4*9oNl1qhb4Y5e1QtKN>Rmq589mv0HY z$FAlGy{RKiv-&(WUql;J_ZU&E)RC0?*USP>HNRRnD^jyQ_EF6B(+lUC z0=1UCK<(kMwMsDg@Tbl@P47MmpA&!h^@U1pa-Q<*wEIYF-6@lE0Bw8=IVs9KH~58x zK`M5I7sE053R+#AbKjFDhllYsPY?5bOb|IH_lWyJZP~K(FFdV>mH3sOgKcM=LGV2b z2~2d>=_X&EMD~yA8@ar$MpW#yQu!cduQGE`h4M4K>1%3L-oX=IhU&!hu_AdV zC^E;2Wul`qKmqGkId}|JvW4Nj!%&Irz5ON@_-b8@zDW6BV?_y1vnV*`5+aWB=>z-kY zGCNE4ExJ079yHuOz-g z*be5?S1HJ!TL7Ai*Z*8jOZsM1ns%h~aYp-5^9B%Vuh*l6uacX*k_+F$5l1x)s=wWS z;rv7ulJsK!4FLDuwF?@bxaKL%A5@*m@+`_CAOCfUq%?obiu6LcFi+iSvf}ke9lIya zPdvtZac_H=5~r5QDwW<06XbAM=4F#(X;69z^K^ie5k#UB^vyU(_!CcHbaHwIw~@uT z!@BKiJOB%3>3)nei}i+EQSf8EbY=1|~O3)YAu1M6-wf9CyuNxk|*Vf|`I`RBZ z?Nfv<)z}zbz-nz==YZlbx-{#V7u{pXjHculx7u%>H>O^ZO~b~fVxK#2C{EQlQV;fc zB^JN%5A+wf*o>Lgy&M?L^ddnMOk}F5mi1wy_6uf9=pm9 zZ4}9zveCKhLryL@R%$i_(1Qns{%m^wnfLzn%k~$rR}grl5SRv~_fVH0Q+_>D;M8x_ z^0J1?(GZ}bfPL_7(tFpF%Bizc;}=tBXAhbTaHXluUwYD6G>X>S;FaL|n!UAR2dtDb zQcns}44y^04m|&?bw3cPksAwP}W}gO%jb_d}AlN4?a4->+@(J z7rO&e6@~{MXQWOS%wsClw@`XgXT7Pl^@(~$EglPNujJJGQb@I@h0i zI0o%CV!qp|8+Ge3N&k<-FqV zNMeBGM#;(y5bGYK%TW6r>*!(gJ^@GwoqYXbt2FgY(!=nkSU;oO15M`8kW!(x>j(Fa zf65xucg}gyc(=$~va6Z;bRi0kFHA5OsLI`?7$uA-U|0aDyA!(BE;jFM<6F6go9#*c zrOs9a(9+fcaX-cTiyPBbVNU(S3Wx2JcQ5U5%(0bA?M*95j-kTM@}Jyaao`ohc^6DRo1r(e?|6sBoxAu~odqDT$7srmfCQ_Zf(9Oy zMo2}<+lnEMQ190jh@Kpt4&FCVc|yv^mH&V-qcYEQzEU{q-lq)oeT=*-Y=f4h_ z{oVro>n+Ofsv#7ctE)#ebNkq1$4MD?-wO@KIm*Y#@51h&16s-Riyk^ZU}o$`0b(pM5?6f@YF{A!nRy}%O#@A;rE(JZ=Z>QOiUleFQI=r5o zKRl|tX>>b0*DK4iSvh<-5b1PmZdBEKQSH56fwd3fK8j(EoPtzlV4pK=>0)!;^1JJ= zqUSZm{Mo0(BZrTj93*A5;x&CzB{VU}jt(%pLH1Mr-hg*^N<+OAr$T5r@p7}4x zYzWv>lXH4j;28Em?OmpRpw*J$pAdspOmGsjXDf#p3M3>}Hu3~NoOVQZ? zYulgW7@z%lvC|uFeCpeW!>P9A=b=i6BKnfc%}X$fc_w`>kn4(t2}&t=b)m9@y$3RJ z#{naqI48*7;c5_v^TqB&Tw!5qr|<3=HP$Shxrv0d$n-pu7<}{QQto;0AlJw%FOoLb zVqi5{$;M`IRTe-V`Pm90ZpQ67KW-v2GIYY|%5D*?=&{GWKiqB~-LgDT_CLD&(x|4k zE?X28u?wPrh!7PJkxr#knnW%F0wU4~3Q812IwOJt0YV}uAZ-Lu6wpZD6X_Kp5kjLN zAdNIZ2niy+Bs3wA#@bfBs(0_J`_-tg>bv#ccz;618H{sA&faV9x!0O=E-}X#-{@N< zE=j#C^PnV!ZKqYZebEmnGg*$+gOkc~D0A(+eAmHZxF;*yNtE16S|GX|R+#SQuh`7A zDz#%oF}fYk2(f455{c&IS-;$T++obuPBAAublGNK4ZyqPy zFDNkd$}M-ysqpbCx}GWjWgnYc4ShnF-gZS>c>YH9mAygv3f)$|mYF~#_AY+^e>VdE z>Zl{>^YsmwkpLNj(=_`>8!LkyIckm{{L$KTj(sT?W!Q9Qs!u$il&giM@F(0ZM3d+^b3r!k{jYGHnNavLSr#vClWBRt5k*I$r&IiE=1=t1!y5pQU ztJ^%)UG@3?+Oia-RJ1O#^-NZYZKANyIAqXrbiNTT!8PI)qYXJaXdj@YrM-|c!_$%* zIF@_RH~N>~3^hJ|>T6oyS!!C~^i@Mtez~bG(OmLMpI z!)F*h)E$;HxUn>I2br!G1CwkI&Sc~5d0Bv}CgYBgL!O*{Y@_I1aHcS4L^)Y&5qW0I zvvEJuhjungopX}Rc{;v9a&CzPkYQ~Xr-j6e5B5*$iUPEWcmLrSB&sZ!_zxjs2)8#Z=*6I~j% z^MhB+DMNKN>kmV)%kNIM`yt8+Ef8t6S0z*2Gl!Z#?l7S_VhjFwOQHeV#T z&$qx>EzuJv2!z6BAT=M5VqNEDdIS2k0y7{q{t4Zk`l*r`89t$d$={Hf_42OEN~~^R z==)iv7-%A+IWQVkWOS0|qh{i`UBRM_Pc#TFM=0Eiy6JcI4(9L&M%&iN!fkevo_TcV zg2O)r`fG3_G0UDj$FCsCw_So`gi`y)&gIC?>^hCd0B`{EzYl%BJ~ZAJvh_loI%}Di zcBp1=Kor*?pM8t5T+ZAP=_8o`LwHLoT%4eZ*nq9KfV(E(BS&<)hJRr3rAHpb z{qe`R!U3P|RO!@amCjanagT#3_Yl)T-OUBUjxE7s_((Or6#G5xIwQI9g`T7E$I(59 z9tGMc>ZzKQvurMFo-m1FeD*v3we>`b7Xp*yCP%899o}2m{>C)0yaq49czP%!`?+fw z*)}NniRTvrEgxi$*xF225AleJ5BDwtdzMDn!q33@4nxFLIYrd?JPw z7EHrve&cz?f^uSGLs!wep5h~5jMf#NGyf@|ppPqHM98vH1-gjeZlxd%06v-kEd z`Co0o&2fi=%UTN;1I>2;_GM_WEos4)d!Msa5wmp1OpoYOg69j)g7yC+(XGnL=@4)w zi%uT%e+O(^7ffdFvZ8z4Cf|-xGC?*FVX;|(XE2sUW zO0}l>P1S=iHU38@)i8tiH4I#Gq2Yn$)h)9^pA$Vmgj04|faL>GhHnBtQ)d4yAgZ&z#HS+46Thn9qAOpxkdwE+ukNpf-`VB%`H8V06Fh}c zJg0a08edb#>)W(V4rwQBJ3Yo}&m~Ll4l(HPxh(2vu#s3J8_P=M;kniYcj2-8y-!lJ zEQ^fx;aAA7y}N7XbnUt;QJ9{rY5x@~;3i=n_@sZ)+Usk>cS($nHrX!Rh&R}bSUdZi z?tFU>cURTH*wLQ?b&>#Bn?);I8Ho+}{;7WN-`Is0EZh*EYfs+rgNV%7Jr$qn3m$qB zCBzkeD5jpK*wl)YtEo!l3@6iV59lOXMI(tZ_A5$d!(!tYB_2>_+<*j&n115QXE(DxYmbUbWtiW)uWm50H3rhR2PW4 z%#mvulabgS6>b)#H72a6k&JP=PV}`bbIXnXP+qiSr*r8EkDi?2q-=@ZLv!t|)K<0j z%#8nNmtDpk{_ivHC!iDszVH(UmKDtorQ`RFpdHw8G%4?gK`K=VOiMCm3v+{b3N}vp zl}Ypo!h@#^ZIVZI9V@EJhFTePE&mJWVeuLFPLKr9Vz1fWbiyta$W6>gIkhy3uc7y1 zBW}%)g{YBzE>U8xTWC?+)(>75SWr}zY`!fd)`1q~J*pK3IgnNn%t!b(G?HGZqP6`H zzn?E!pMP4jM?NVjz&ETlvTXQxTxqUX9;b6P+Ha1D%MZaDfEj*XJVlxrRU#FWz zA|HO~#(`r75aqZzxbrR8Hm+6AveKL0)Z)E%S}Ux9neUEf;-}V)`EN-)aj5QitsCH{ zg_3S~Q*^kf%qHSyq(UR}L}E`fOd@S+ElYGqVqh0;Jb%qL+6T*8a^^(# ztKXD)mLk)I)l}@IDvny#*z zINzlTNQF7}RnsQ=w`~jVx2+H|^@N#HG@xFU-UQ#x)nXDK6Q;^|ZF5rRXGe0ek~b>2whS`yEIo}Ka7%&h&&c5Mm2w~eU#EH54&eZKurHN%B${ec?f z+C`5yT)uvuTI}n9o88J$ak6B!!)Z$t*A|bZ_8ZfOpa$Q?pihG3#c1?5juAd3%M9D= zoSNIDkqrFUoguQ1_Ri5Sc`lw!{udOy-+m;#2YaM*oUYkw?Jh8p&UN{$T&rf9qD;O> zp5%hUTLhYyyJVT#fT+S`wALNy<0>Ho-VMCdGU*qatFVWzJtu$WZ$AkHa0I_XLq^;U z5+tFY|Hf^RuR*in6Rk7hP2HBv)t3qYc92-3Bp}It$IH{$x?!L2ELv)J3As1wgwT`; zS6!_o#D;H-A2MuGcbmsvpR!}YpEme74H`kGF*a)SjtcPVAC~lgEie#T8%k+mpU;Fa zXp;&nHNI{D%#+29{1(07zy7JMET|CG;)!v()G}ehJ&J3X`YG^Di7C`lJwozgUeAR; z5}Ql1;%6}m*{@5J+1b{JwF@3wd3WgH40<+{(VT`|O0m+ju&~cJOYk@L~ z&s-MUg;CBAn7mTWM#!V?B?#Qf0XugurQ(YGFs-#vI3guqL$ND9QB}EXycFS=!pG$&}=< z<-2dU`GvLG+!$tM9>BqP3K@W&qFnE!F2v07Mj$X5o$Oou#@F?x)zsEeT&|5)lInaVEz-!n^PH)?;W&$MB4Ws zdNZ`V0D*^t-|$o5%h%>yX8BnV7?9Fj;8xn@&6HTHPwwIGa5TWiPTjZ8S(MOYJ>+Vsz(>B!|x zCO5%sP5A3&;T<^kReV??dWA9(^CJr$64_5-=B*SkyJ)x+!Gs=c=!Q&mp8?4(nS_o!rN;pxt78ybmBU4Qxj!#?zG z+<{XqT=TmF=?c?U#$KMF9F_MuJt8N+D=gNoRZ)ILE_oEq^IR?!lQt{!eLOwnJ&z92sypQO zRMDPO6ubG_tPy&CjKG%9o$b%}0^h;{UZJrWVnzj+02f>NBNmq^We1@+C;qLoF#`fxtb0XI#MD%jGLV zqllZ?UowqWZq`~}2O3gx_^u~%9jNYHRLZzTCn{fK} z9_KHqokOgujPh@r2(j$cApEJQBZI3IpvOV(7+kblrX1 z7pL}yS&Z-EsutXl!j`>%NFVugB8CJft+OeW*6yiRVU*|Lt~YL zU*P2P_?_?R%k_Qzt4AC}SIxMIoKbE#FB4Zk6_@@4Sn}z1z)uWOOn#&+!d_<(gV{i2 zyb%VB-1bweBWf+j25UFc_gs{ph~OV|kUlg$+{q|6=VXN3e>{6!X`}*q$A~GDDqc#P zbME7JxPH>Mv)`)h3%LNwKMik=UEFtT%jGxGzPJJpxJmh{R?vyF7$IqyoB9_!yK?#q zhSA+B0Lr)e{ilRNf1$`nY;~SF4T*(UxB>m;e2YJ#Qlb&Q{xZMQ-!3JfpJ0PKFtoz;sT47htAA)5{`@TDKcIozVLDl+);c( z;fDA+Zrih$BUIP7mogeX&+l5Y1j3lhXk}hnZuMu(ev~5}mJZ9krrg&#ihlq*$do%= z;GdveV-ff1wpNgGY>~-+mz2NDaqAz&K@oh$e!{RBU1>I7mxI2@z5?Kz=x+sfnte+T zZ=H4F-9MNZIebQQlrqm1#gE(qUE|o##9ca(VY&Vw8@w`ILPpn5iP#y-b5l9Y9WEJvN^x zXVyFR^kO7WBUYM;zah}?;_t2D!f6NJvW>iH?MTF@`%aAObJ^7|SKqZ-q{Kd^-@?tBOe?7C!Fpi$74$ z$_Fni+vYn0eV$+a;@VPi#07~UU&NZxIXj}YTW_!Z#~w^CwW?({SILlbjk_VH`K5SV z8FeR2tf4y<;EFoGt872PLY=u&n>H;v6*KTG1`mhPl9Q!T=ODDy)TsBzrdCqI28Tt1 zH>+x$vF!3XvI>L6>SwuhLcT|mps8jv-Db-DkQc7Y1;L8ugS~uHPU5Pfu4!PiwMlW; znfSz4iQ3zWju++bk9Jsu*#5rm^ZW1r!KVFRFg*CPJ$to<1u$)xD;gL}68EEWTeR`w zL|K&a5M3q)nT56i60ur?jXC%+^TRvo@k%uL+Op=eJNo8SimDcBPsF`=VhhR4dj&c2 z1?&3oMi1H}i)}MD-{`$3GyTnWLAs>fX@$L(AVqmfuSFD=MpJ8fF#M6*BwF+(tOg&gbj_R&-K&gBS%& z=ZJXkk3*f$vAm>5!~1~YWaBSkW=mm@dvPB9*dC9?uGE+xsg?})W}*}%I#S+UQMKEY z(4;;-2p@d}E$v8Y;zJF_;G0WN+A*l{YA%OEOi~swl~x##+%?ADXck4U=VfTjM~}!n z;Jy@R=So@_$?|7|9$Y+5Jof3GEjFNknxfQwiRG$)O!R^aCBug6VC__3`qGicE!j(> zPqx)*(b$LR{f}cz+8|&HU&nie*_#iqz;buJqaTXfCd*T>VO(hN7r?H1MLs zbbnBPK6(?cisaf!v9hMppv}@pR?c5roDA@la$J&~7epStv;(psPw9u(7q5?eT?`pt zmOCdeivW3}V_759%D$TKG`!3i z2~%e4ni|Hm2k9Ts+^Tzwx9W6oOYR$?NiPaUnBPk(Z<&+KDamHo(^*l3ac%wV;sM7z zu#i@Vl(m2j!0Mv2Z(7S%ONwpID)zYpNM^pz_?FzBnYlg9^-T@2^@UL4Y$zYx^?FI(-uQVQ}n>O}t?xs>0GlROj87yiPLM*H=qHUfX9W7ph@ zB^D*FFlKQ%Bf{{B^kCmGla)uB!AP;Lh$i}Lzmt=;w?lfo7N`?O28H{CK_kl(8+8`&;*{h zzPuw*x^42d`jjJ406aGr)5zE9Yl%?fD`|&Us>eNb(&#zVy(0K|^>N7uMK{iaKV zXMUD!Is9y)Tb_9lcYklK4}-F1uX?LiXiPWN8rrLzV}y7m5ae7(7*%U1#*X%r2Wla| zc98?j%0_ML4>j{q6QU!>kFC16K~abIblQ9@L0{Nbqp`|ZPQx#>?ojKSwvMU(FxAli z31Hb>21btb*xuyqG#7d7^T9vR4gV;I3!KP6{e6aY|0-wsv%mkRAWmsOX1c@C{oCg5 z)2&7J`$dnX1-ydbN&ns~z#asCdG2f^SZDQl&wmQIfz&C#1G9lw4IumE%k_+>FKBZI z$1d<*RC?UXt}L&LA35}-r^%}DgSDn|>qFs>TQZ|(L8=0{I@AN1&Eygm+yU5^g3lC+ zz?xPxo5ZSSx^cylSr>Uv_;UOnOO6d!px>T05vu7eD!m)iS<_xh64&zmx+)XEmA4%C zUSQ1+3kZkPbI7PW8H>tAm-Fn?`KT|8~z5catl^vX6hh5W$4*s&3+(`lh^}Pzm z{eq}{6lmUjWoD=Btw1ul4 zQKlY>Uz75>$4k@$L%GqFmk-$Y*a&Jx_b>X5fQCHaoLDF{-Zz4m|0wspxp6to9%cfJ z5QVJ+-|4l#W2w%)rKm>>aDIzkZR3mKxMFxd&V+>pg=+6U>}ql5pImzd$#f^Ak^Vf+ z;eUSlSNQ+s4`7#*VZX=%Wo#U)l$kqQ1NTJS6RCcnw~p4iOZpC9a^=!Be@(Ap&;D7y zC0E|i?Fld55yPUgVv$##WB-4O8>}Z>5?g->dxH=`0Ter4K1RSx)`|&&7Ol=Q;J{|I z_?PNzMTt-Y*q*bds{ne{@S5+hF@_kw~7nSwHxF_A4hq1X3~md)Jm< zMn`|XZnWXT@eDi1hcbZmh*Y~Lpd%)S>Db$G+ZaBHe6P*+IxtO;Aayw%J)W>>Ay$ zw(&`8o}2I+$y2&ehN=5RKTHCBf!&lnRc$vS#>O!@O|ZiZsVffpZ)>|1QVw@7%RW+4 zwpTawxf^~@HF@hH82S;UAn$jEk4hhF135mJPuOiFckdmT))6gsXrS8|PJFn<+ZuwOYgT>@6W)M*pP>-iTS(h0mO$3T| z{Y$AnJzdVJOKhA|k0wE+-W!I`(vGM#y;;2|`})|>^YQot2O8%6E`N&_SecK-S5s%< z92KrrE!%@>Ne_Q{pYXLfCzF2ZCRaQ7j(4s~vr2JErB|C{Ky}YkgDuU8Bc>v*53kNy z;qGoY3bbw}8|=sXn?c%3V$`i+D|{%1mfY0tEN$2bhib4Zz3)5R1y*`$jYzfg4A+Qc z7mw@>HmvWCZNU#6PArZ`ZHTLc2J%FCX|)RU&UL^SOOT<4yS0t-BWAQ+mF$r8kreV( zE5Stl7GJrhv@=LxcJg;`kH&{#y0F6V8;H#m6m@LJTdbRFo@Z3f)aNOf4DrjYdDW4V z@)xgFzK-zt?)#{9;7rH*WBN)-T_mL1KtEmR{hT8!7pytinIJr5N4IhkN@jHUsrpY^ zBIeU^Z7GU#C3N#t-SmANz1%og*EbiooPMQ=0FZ{Yzy=4L4YNGFD|?YyXcc8WZ+G8_ z(VVQBzWv#k<>2r;-X5&ygV(BllT8>2eC`r`ILrUUW$43NbGK0_6jKjV=H;NJIHyqR zV~B^|pwT3B1KNz2j-^?7`poS0d+{>0V6xaJ$D!rsm2Y8f<~=m;5r~!#Qts2M4?9o6 zPr=>bloeJ8udsMImn}PHUN}S8ahWXiP$R^RT)hU%&eiBzee`^`a?k#^QSzr8PVPOL zxY0ShDlO;&+JkeLYs>cI*Z~ZD9wv+F0b8|YG*Zu;F-;!DMWPk+P(W6>H0^UwLF^ctvp;HDs=?b_J__MkX;NYceggnoSndQCXZsG#)>^V8y(u4nuy`v zbi_I@2mPvYjm--dVo!#7X<92KrI~Lp$~~GjzPcsMlT;5G;;^9XbmqYx&_P_5Y1Uz3 zCRba*SbkC+sdlrX-RSP(_*ZiFh^e6~|g-U*TNkTC>9$Bk(YiFd8z37eEDACp$8|AE51-J0{32wqJ@z6uN6a z9_&$jzE3z7ec&1#$GJ~4&_Tu{uJ25-;U~TYlKPm7o&Mf z_OG2?UI^~K7O^hj%a??%d)M!}UX?rpO{6j`EMq08_n2QQ-`6(cU_MnplDTu+kipbNXzLGakwC}Lj@syU+ zsWuEpc1FnhHp3WD8X6kWW}DEZg-pB-rzq0vqE{~V%+0w{&fYm-Dj7ydAsrEyNoLm7 zt|x8eI#I>^6v`NH7{pg-(?FTI&lf{dEGy1vDQVfMOpI?}zE_CmzF-e$xWn|=7sfgV zCit6IIi{%2F~~hX++aXcO+XA<4~-0KiW3q{rMnOViyr5`tG*pU&qzd|$SH)eEG?Qx@H{+WgM(tZUzN@jjXi?KwAz2$(QnK~{R> z;O=nBJRROJY~SQ0$`~PVU=Aq5MhS=8%#H0O5BmsWwdWQ0e)$x@c)j;##sw>X#79_Q z&@y~8rn?3^`_;7%^OIHh@*U-17He}pM_dK@eheliA_oueXQ`YjP3K7PxI7B^(VQo3|1tUDnr{G0u zr5ND2m>CR+cPT3GtAp!CKY@~eF*{#AI(IL`0qXBb1eGi&` z*S-P1hcudrsHBFY6`9Gb8V2M(QJPeb3w6@1`>ba~Yj@~gtu-YKc_bRj5JHdv(hWln zdxmpG9P4VEuX#UBR6DwH>4g+4l2-wDr&84Ei2Lvxo7k>2>IU>dW;h|dvanNnG9^AU z4;5ph6Y@0kZWTCTzN@l;?V*vX<6Q5)Ri$;je|24O+Cgb?lO;uh^$tu1L(W}H4}Mbx zBC;QF7QC3npy64aLSt2R*>up-QMNgm5a8vetllDazXiGHZi?+&QPabQ`>t)Ru0lKU zNU)PAT^6N=Emepbqg6L_ZZru=C(~3c=>cJW&m(b97xrY~XLV}r?O5VAa%K7%jvvZD zbvN&Mbx8yu^WU##hkplV{%?L?rGs9`h=|&gOa(_Kp#e$>pi}SNwcLfuKt!cQmq-XT1O*O0KJ5`;!eWhESyF-sBL&rk+dfW5v+-l)5kvzFbP5T$tjIgSBOE z+;1$%MsG^_GMX?JC3x(}=9PlOg7sXGo!YR(RvYsMl@us($23uac{<2#PBPzVggp`* zJakPzHd#mi<>UROF1PIDMJ_hBeJ^S}(_wn#Hx+0+oN;b`JFs9JAI@zgP~7NsEuoc4 x#abZ<+F70Ew3_YAGS10(twW6?O;Y+5zNY)Mtujk}O+FcgZ2!mkpJRUx{u8!thN=Jn literal 0 HcmV?d00001 diff --git a/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/acc-vanilla-lr1e-3.jpg b/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/acc-vanilla-lr1e-3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c06130523b46a90a8bfaa33bf946fb97a8f2f4b3 GIT binary patch literal 30033 zcmeFZ2UJsQx;MV*QWR+-HPS>xM5#)P9;AtgihzI+6_q9;A`m1b3L;I2f`XzDq=le!=i}%9(-08c!Y?2w zz|X%$c#ELWuP^8?5n&;bUqAeMlmEDsn~#TwPe_1Y;7_Ogx868SfY=tUDegNwT*?5q z7#EKi7pDz?0RR`@pQgtJ{O5~{n}?T=UjUkhFmyuIc4+!MJkX^1pczAFM?l8`UNJs# zrGrQLB~D)yP!5#Tx%=ql7L{Mhnx)S4Q&n{@-Ha3z+9ADDMpkXN`W}tFdin;2hYlN= zAG5HuvbH(yaMsbu`J9WZ=jAI`y}W&VgKh*K)S=b_<|(Xnw7dHUPT?A-Ss^R(ra)iwG$ zxUu=`y0`$I|5z6E{vQkbi*<=X>*D_P83_EkE-vm+Xz+;f@+lqU7e9Jh;9{VJvd-Nt zlD|B9S=KD5qI-rab?Iim&<<5Sk{a#TrTuATe{5lq|Io_*|AqaZb&UfeJY3Mj;}HYk z0E?L&zX$k#bF$Ng5`wh;kE%yKIET>Kohn_*+HR>!l~* z^6iRy@5_kL@;dT>OSWA zrIu$qQSw{ATGha6?)#WCe%8~2C8_6TKbP)*Vb^fe|5DQi&vwzzSQzRTsz6Jrpa;kS z$T3NJn62N9u3fUTavNDY^oF8!>LHJf-lx}5sv>3bkIvz@VZ2Mn)~7}9>~GwMQU!lb z5hX#_&QFYhIMiWsxGlwu*zLX?@b124qCtVk*u^_Hk)C+;?;`;K%)sbThIC>J8JjKQnVsYx9Z>^qAoQe1Wj?WOkQ_ zA_s`RYO>8t9Mfup)Md*e*xRw?5m<%*MU0iha9+SHcf-df1D>sf$ptNw@0fj{VkiX+ zIi@)^G#0G{^3W^6DT^?_V$A_tht z_K;;gWV%5c-^I#Zg^hB66X2Cl@?kdD{A_qY%%dMYVf!;vgX)Ex2y@;y%fl`(^U9^u zr!YPoV7!hV1a{Hv*@BFzrF$G;C)1Q{YU=>L_bh4l$@VRCHwh6WMVwLDmu+BnO8Z7U z&s3Q+kFgv8yK$a9Of1jCCHOPBf*B{6aI8li%_LK_-X zo8Z?femIc#)nSwO)}stLpF{f8&}IU7B?po-B&%?QPT~Of_JD+iNoh#ju9>Msqa5$~EyaSAs2X7G@ocVGC}h z`gLH0IKU+Zfeze`g?d~pOlr+|DV8Y4!z~jW;p>U)J7BuCUNpd0;E2{8d7)*|dkAq8 z0fsWB*&pvsQK3JiUC$s{v(1oR&9~=TiUSb!wr-}Ev{X)}ox~54Gta!fuJkN5V3)yi zni_pNnQPU#88hBWHffZm?Ji^7U~*N1V(RyoZh_m1i;?u!Q+@ge!?hn>_YYS3Hq+Db z;e>;QNap3fXr~otvx+w;bE-g0Jt7BXI>HRf8U4vtNt(A8^$tycaoWQ-?%M11tpUOZ z!rE+=^`f~(6>L3~8&x?#WH-7W!)x zM@|6;XfZ8p4^t>^7{=u%Tn&wTX7&8rr?Xyap6V}o??f2CK~KWoF2&^D*9xb91RXDr zi1~^))nK--j(&PHDwDd=@uS1(%S7*PQ{@BJFdxg#U@{>dF2dw_If}$}BEAR$o*Jq``e)VG);{vAj?_@SP;R4Jp6i`kQ2}HnYffo{D$)x+36ZAxWl#=RrYB%Y@U8g`G_A%bDwKf|&-#>e)f1O$!>auZ zts^ZZ_SzPuv#zop)N2+XrRm2d-fny<7?Gf|zye&juAQtsc6jJ5I--=CG(u)gmj z@5`EM%0^eOH_yP&ohVE2Hd+aGFRJslAyduMQuaNylb(wh=dss$YB4uf6P9U}E`q-P z^?Apax%d^_7@jOZc|Q#k_Z`Mm87}lsA$WX3==0-R>-X4u>`QIK_A@1uHT1fVZV`@p zr~8d|ceq}iEtEjsDo3|ry=(*^fwQghTOCBfr#OJ*d88=0B$v1^5b4z>^vEjc#&oXJ zc&_ZN7gXM)@IHYdS$rqqt}74tN-KQY{@$DCO)X!*q3cX5KN%|c?tv{=eMei&3HiTDDZ}0AGxuf}GXYWIa7#=r5vR$Ino;HtSdl6J6iF* z%{NyM*xs+SInnf*_eJT>!=^tCq&nug{`*?$?7`vmRiFz*=Wehh5IC$ zPunkqK~mgI-C#XI%O5Rb`cWtIsT_brh%`~q1edJq5aMoc8n5RleDmS^@Zk7$d>>s* zrS0SosqZ}NuNu8sP?QniZ#a!z2qM{!Ytpl5*hmwZLS#`VdN*}4L+rEj$;$e7gKIsm zAIB%Zxc5@jTE~4}Ia!44JLBbe*1hP`!AUX37)vlI=@fa5c1>4@?9lMtjZ{e8vz%9RyRd+~&vmOChhzKTIB~EUFac zTi`iQYpoQ2Nh05J9>{fP8jdMvE)TE3CoWVOQ=rHo z(!bvImht_DaIHOT;b00d_#1<}T2d7~am7rt(*i;W2C;jHF+x(X~Mz zGkNTp_?{}w=rf)*5=7tRnCsM1r;o9)Q}!Yl@BC;z z*c~>P(f*g@hTYT0U)bhrut(oZ@92^h&WhZCc0t#rLG8xE*ZO^7^{Bm44mNZHud`2VA@8m&2qYZR8|1>*c-X zYnh*hQA!LgCO>3@>QMkKX5lL>6-rTv4;q!ff>?VRe+(3$k0|a-X-QZ4G3eg)#)n|C zT>UT;bz|Z!uZ5mNrFB+`-)iq2p*`9EgYi08J=xSOOzC0Hn|-I7WE`TR2$Ed-Qq-jd zZK@T$f%OV=sq`MAG9U>)Rm1`0udS6TGWGhH=0&bP93O5jZIp?}OO1#ZtH+&%1O7 zzwsxTF1LrZ` zqD*xZ9|!o<1Kb{3Uzlc?%xB8d45RencP2&N^)vkn&GM(Uc4LIcO{*Jj|CDVQTCg;& zaX1ja>+)k=F1Z&RVZ6 zAmS&tcuJ}~f7gX?&dHcoG4ud_H4Qe-!vO?|T$mMIE9OCIiY5oJ31nr#sTowr?da-p z&0_@MSC+qX#xxdX(q3HEk^Y z-k1N*B#$WmfWny=xF}k&=goPfKvTLwjCySFv*3t`)rkF?o(1zd%BpE;{Q8Qw79Uv_ zYtD>X-bt49jI$q2qw8Q1!zNC+a>G`>ArtfPt+V(7QQ$|*w8Ojmr2X0HWP9~n18Qxj zKb3rYlB|Eje2>aq4sc(Ym52GUA6_ZImT2VW0JB%I;3e&GXLj;5;|PXiJf+RP>diA` zri=artFWiYu`=P_h_UIWNfUP0%}oSzH(I=6$gHDN#=CJ)V&n4EjplvH!@_o{|MbyD8{K{S#I9WKB17 z*^cJ4nG4R3nm@e3-Y1sB7BZ;5OSX9MDB}Rin3}sOKa%2IfedJN+M%7_6%w0f&lRb5 zP_IkUPwBi2ueY3%rOCwa*^=KiMz^Tyt+*Jx@U7j{npSOY?@9SVMLL;QNfEG(@8t;! z)U-3{IP}E%Q?hfI0xd1{s%o;`8ESrWKvX6#jTqagfrOC^@81fLKsHk*+8b2{D&@k% zS^imP9?I3l_SxL@bh=`$V$jzn!s7wubibp=u)p5EK7m~3Io3lCaCr&qp4c9( zk%)=<83qT^LrTd6zH`2YPKAO>o~dN@TR`%S+?#&*va+&)g^PoF-G_>ci^=DTDN_%W zkHsG-EIf1yf~%Xj8$9<0=JthHrx@_9-pvwa0XR z^mxZh`S(<4#I{MD<(;Sm_z%5-MRS1ixrJ`(FPj7hP@7?dlxUGQ3Q042mj4BbfXoUi zwc4Rae#OnQ?^vEv^119s)8{o!tlPx1uv&j~)g#KR47}HOGch+dp${cYy@?d+QL?q? zRll#r|A@QY=*uy2Lih4n)sN+`1($2@@7yI@_ZlS!niu&_DvhimqCMnl8)+d}#T0V) zMB=uuv%{6(uw&8y<)cIp_nVdI_+LKo4gh8$goNL*QonCXDMpxP{5_+S#Uu*mIOs^K zb6eW-ec8uMqM@dbGT<+5|LBs7mX`c3gFc;Rz&mH}J@cI^-_GAmLAT+{5uj&i9^s^GM}^PUIojU)!nQ+qyeu zi6ute^iURV^()U&>Z=T@cJuHiBD&suH=dMe026UlYkX8>Oi-EMQ5H*|_)_upd|Bp! z@*VFVr+!Mrsr=q8f7e2y4cVV?6#*3eymQ|XmDq)iDNL+^k$bECt1)o8CFFLcQ*t@Q zN7?rE7t40!zAW)Wxu6E)0J;ZJ;e>sS45bILGEKHlZG}5!Tpiknk|1fC?v?A}riku! zZA(z$o7%xU9lwnKgG)A*BjqmyF_cl#wBpV-Tok?>--;0g;}_o7jWBhm!i9y$YFMn5 zW}y!gHiVd!6BdZU;SmfIbdLu>l^jow{(O~+h`M&Gx)zudm-Bx3jsQ0ZD`7X@ z{G&JiKCU^Z{Rj8KEHV54z8m{$5=L|AX*Ct8B=}Sb4Ax~mcE?PCx=Yru!|Ns_Uq(VpSN~;dCHE*zN(as z#~13}0ZP5Ixa0Ty?U6ME|0Wm60nS6&%u#*QqlK6c2Gi_!aCW3O&nyvCqrVK!!!*P2 zUSE%Q*rgn0n1C*1l$YPu9TNiS>(3E@kB*y_T5my-t;#NK72lP}PK6sgxj-SRD7YKL zeAzlB&3aw~+1Ax=e~%OfltH$7vv5Zt(XoI)WS^Rk&)MRl;5ke{Z=m!eI;I>m;m?2) zqR!hBqEBtSeF+XuM?IJwmEG!NIRsSsj)7q`CfuIP5Q}}T8!ZU@Y+)Lw*7Fh z7Ebc`j#*(8LWBc(ngb-1W@4s;@N52<4UZhQp)m(=8sPv>=2s!qa(*|KEy4jPqS_2E z_DetL3OxIJ#8wU_tlvL2uQ=2?l5ZUE%*it!W#mi9cmvTz-jM^o7H$`sdIlL zFXU4u*xNH;>>bW@FQzJD>>(xqb14kx$?!^T41F|+_%K#XcY9JwNm8tONqu@zx{Al@44vJd&+~96Sxiyg^3MOF+_VUz~`ZFzkVJ z{DW5OHLGRbv5yj zpoLM_28oDyCpuNr_fAxPlY3A71%%*ytC!r$J+$1CZtt}nDwLrF2s8ODx%jWWzYTH1r`I);8v9}U+wu_V#sd)#0o z$!%;lS@Q9fYJRbdXBIp4xF(S5gle2q|KxHQ2@<0^&p?gPvj;61p^DagNxE)TF(mlL zHe?LD1=`nKn$rv*dD`NCFwUA-k#Qm*{6vIbd!zE(0Vp$Fznw@vD$a8Gz_aoyN5fbx zMC15D=j$Kv7GYN#7=h@HM%fR;a9>Q!_n+(83GLO%(9^t=8;w6ctq)OBM5YAs2xdQn zo;HwsxN~zyM)*zmp$ooC`HyF-8bwTqxoRBXLuSPJ={-5j6BhbE4`G(A`aV1NAgG`y zs19ezqu}5PUpzCxzWP@f2GeX*lJADZ`v_m?y&;mR@VOte!|#Fs*)L#0euaJ47c(W! z0aRdXQA(Ha1#u=8ZD$h2mP7N4_T#P?RxXAMKJ%0a(RZcdl2In zCU<$iTU#1V-UPPe1rE)&7axD4xfZmWIQ(w@Lg44TZlg2yy*dbn&zSvzCP%EE=Fw6O z$huNGFIu3MrUSseoeOGH-Gqhbn56B5lPsqBL{;L2 zh`MIXTkXys=aT_zh8%!;Z%b+b{jrRo=6B5Th5HsSj+^v8$i_E#M|@e1?$wyv*r)cf zsKf47;#`1-buUqO{FS3leBdOx;vFoN?wU2dJ^4xKDSmdqE|L1RkfY zoX=GJN#wxvp8p`jUR`9z^z5?#fapok2w?)zb=f- zM%Y@Q$~eFlX{Hp8JZH8SoTg5Y93SA9J6p_PDb5WRt08q}9!s|ZUTJ<|b2AUnLR++B z%8>9**v{Zm`FYLYeJg&O^U9wXy&-%jrKB@*)Pdq$o*1*=9A!jDx@L4vW8Fd~0Gl~Oz-(A1b zZ^FYCl;Z$<3R&q8r5lB`g-C9C)v7*f1zIsgfmi|f74Etz`?p~?&1v|! zFnyR+@tkbfxto>%02XBvDjZZ7=}8@U>nW+ABBT&Fh0~ zQ)LcN;>RqS!-A?kdDW1$=EbpI?9)~jH#cn37R-J9Rpw-(?-pj!gc8*w-$#7H1-ass zq$=lWx&6JP0>7({{qzyJRVNrz`Su)3Qo)(cf~pZ{P{<%L0{@;hk6)iBa)87>{uwDK zqS%u8oHIC22I69-^k$AZV!2%CG1N?DsV9ANqu7lK>^WleAeCQQ4Pm;@85H=H)* z4s)_ug-d-8*-U(q^nSc&Ym1S%&JF7iJzk^i9(+Z08Z~m79zKZXf(%cSVJaaYbP8#r z<(+Oqec#k?yVZHb-zYNo&UV`aAI)qso!gBz*ED?etntRhd~5U;>z9#Mv{YqT(tNMP z!TXaI_w_}^~7d< zwzceS&Cvak^XEHxk5?p?TbteZV!Um(gel9)z<3eIx~=M{>kI3@!jwFNON$UY?m0ye zarL?JrRPCdvRb8g2{;|dJ6!X3Cc%xgXPak27Va&yKR-+y>&}|Mv)n-Ru(fAxI@x8; zr)2;2ozhQw`A-5ocL3l9P_zo1wvhb${D8P5Y z`)1pwZy@&sUD|6Y%owm;@uR+9lyvUafao0=FoVWdPk^Wq9x#Vo~OC_$a zb01^9w2_OR4`If0ki#nhYuFaVQ9_fVgy_0`JPM^<9f9tQ#0agz-_rWnn)l%CX9QVa z1~7N$FacBQOf@4MDgSgSK2?)umXqD+in))VIxWSjEKKfW<+7!aLV{+@NkM9HN1l%8 z5pC-Gb;7g_2iVp)$P8mFjXTh5z%m*SW|N|W5trMl&kFPhHi8ybF>ukY(#x~}GBz3OVDr>G1m|8Z`CzeXEaf7=2eP=t`tI;Uu95N0@#F^dz|xL)qd1 zvNjPFKj}KUp!4Tjr&;;;VyM(xBSi(;y*_9zR@ws`?cfs_=y(WyDu8Bo4$9$tAUVLI z+>F`!>@ThXQE$F+0QYQ^CbiF;aJk`q+Yju^BDOqqjvcz|zu!7q{Ge04QFL@DV*a=TQ+31n zAqBj zl^T~46ZNZ?nXv9JaGvw`VAKqNO2Z)W?g3qfV11qe+a2I#i;B&HbT+ zKVqLiE3yde4Jz30Q%ww+z$EsL{vt`v;Z1fIw~6@pVNZ5^a^$Z`|9_R-H^HYK;g{n} z$4c2B4DaDr34RE3X9ApRP28|>UKoL(VYwE>rdL(z%h!La_LXPaeq4aA!K!eELKQVu z3iAqeexemtdHtCmEFqx&N??TIOOn>nTd!Bc)`YicO(kntMLgc|>8-8WTbGynE(hPt zc?I?}B$<9-K0^VyPM3lxtOS%gi39K-g%XxuK-&Uk_b*?&0zCQf<63L4lRiue=GJW2 zX(F0d4)AXzY0kUu;hDjVrO{8ba`cE3+c4ulr-{*MDNwE0Ou8m&pNSe3cdEW2#=*HU za*0w?BW)0ZYSCiWA$jd*w1zsDPBNVbYlrfRdoyl~?CAe+Of>TQwnJuB*Z*wa^th&A zzq2bvb2v~nfSFxHhjgxdD@mKBJ3i9)`R|+a-!=JO$chd{394dLAh}c?iL>ut?LPK3 z`1O2RTmY~6P{s#Grrn{AGs&l&ubp{(QCh=T)%&w-X2C1(^jn)96|b9peEcPBgMx!~ zI_0hTZFLWF9g$B)O07u8Vy0fXI=5+Eoo*tQ2{gmSP#(hGEfCqyJ$kum zp8bp3iu( zddEO&9eo>U6GlUvuTJ~;+2p7^{tA`;H2hh>&PxtSzvPRU{S-fV3i2?|p&8^e&u=uU zqKD9ep#K$Z9UtO0=KhhD1@@23)%%tlz(;T{U*35DEljnFLd%1`Bh8ZlwHlZG!@#C| z@ZnIcF`I~jyfXm=MBr`9{a+`i%HNz+pX?y3PB0E! ze<7Wk$yj?l7?Il9U5%jHEZsFztHnC9veD{Lom4?ci9*Z+rl+x65mX8$$!}W3?z&`r zi`-B|3RoCqn4!cIrAgY&0U{v!*iMGi^PF~h3VXE<)oHfSQ5UcITq1q;+`F5O#p;S9 zl^E*I4+JRrK92*W@JXKb2>Tu_dHM+S*B=|diQ0W{M?uE_ zp?@dq28PAkCvXSTtb&=5LR27F4+8`-KQf0K??brQUn~A8E(^)OJx=-JmbXI5bBQPi z$V$)rw`qq<;{jSsG#dtbK;h@4!AS9~26fx2@I>=d4Ubh%9m`RBid()dv=P(_tHw~? zGmf~OM;&3M!Y?cAUTLauulInB)O(Tz#bhQ*u)vL}Gv2w9?*( zERWa9;1N2~y*S;s_$ z+~Ye!mZs#;91vhT`CRNH|Fww}{zNaOd98&@O5AHZuN4K4YJSJ_AcdO|BK4YHQ`neH zski;N61FhwOpWcT&s~a(cydHWJ^6OB&Eefu1q6f5%7{z^?22OfWNw33KrkjmzNjZ& z20na+5+oDuax1CuMT8P2j04<8_0W^rwMCIZt&`&Y#mvJbJO1tygA|o9zl-_fxd&SC zUymJ;vU)2O60h0e-}SY@-d|1X@*Bvp)UVef5D6haDYBL`#+ay0K%*P z+L#Zm5SZeI?9+}6XivxZscGzI6usRDQ?))HbfqTQjJ^AsKcscfvqR8_m+!np=Ck_4Ropv{f z<3`NFqw)UsJC~;wj>#)#VW$He1X61&*=4cl-gt)lm4FI9dfwoMXMK7VE+w%?YA?>I zRolm%^`tU@UZ_v)ENNV=e&7~*mT62PLfA?3S(Lv&O_;3s9P<)Qs8s-;w|dCF+q$(g zu|y>xb@2yM(|gN?1K9XpW#PH7u)_Wh1>F-B@#Ev;YQL&ui>h^wP+?9q)Pf`r?D-Q_ z{qj##HRboH>by5Ja6KXLo=VqoSxe3Q5Tc9ejCxT-vW;Kb%uoKj!`+#0)?Q3fR&64l zc6zsarw^~tPRFP1#G?RCGHdeC&W=*vvtmc4CaANIww&Ye(HQE(m`aMP^* znrs`od(|cs9Z$*KIQaC788^1_&3cH9=6l2Pc-Lx%w)5rZ9yBse3Pdz*@)6?eeaOZN z>LJKB{pz?Ho|t- zE`QQj)zzy`7i8xx7UE5J3i4aHQt?cQ=T2Z*={VDdg>O9Q!8|#<>!F<&$@bDPT7D2V zV0?MpOXbzNaQ%(>uYNxogpc&qYvoLU;jKnmuQKw-+LSBH%Bs&@yenVvbR9i+O7pWhRAu#|#7s z%VyDN+cCgG(Rp~8QUMH zb~*Wbvh^zU>AwBk*ov2mj&<`B^An-|jl~ciy(>Fj^XdHmti10(O9Ou!QG8zGFMw`7 zf!%~ko;C%uL;)yHgJQ9dd+}HCj9rru#64(Q9I+K)0;%&~MIL{XgcK!Uu%}QLSvh9A z(IaT%a@OlcB~W~h9$sEK*to-Iay#w(W^i=I0(xiF$mP!g6cuHyQ?{n_ZI^0Yl$W)r zg>OC$tzlb@Ut_M}Mmw7)Rl%tJHQAV!7OEn)XdXw615+6m4VB5WpCxXa zxS6b>`9wAHC>idJ19v+^sP4EXSs)%26h`wS8{YG8RQg^$Zq*Lo3Tn1(clBZGmETp| zLv9pz>)v;{iE@2lygNX%HFj*w@yJ>!fRcI(29Qf7kTFMSOK~2ZZ96tmvY7kVm!>1M zTLWFj6E52s5WBg@&I8LD`(-+A2uN}*L;Qm?>1!LlYL3E1Q*Xjav-kKy24~Fs1qZK0Mb_j4Ewk$3jvp$D?8s_^mDOXX`f$q#oXYw$VnEZVZ_I1Bz zGjB2NrRu`?tGYn>k)b!!#W$dGD1TlYx&5ud^y+g!qBQwcj{T04eZ$esN>& z_c8FlL;4`J(EYe7!#3grgm?)D@I;JWpVg>8y>o0D7t0qqiZuHgC-O2z+(Z7rSy=F0 z81f*tg70o|QXxrL^~sES?xouIX2gUL8-w0`A!hfv<6kUJggkI6e_U&+S;Q2t^H;k$ zLOIHCLRr#c+Co_naegT;5@PkXenQaBBJ)NfVxqLy z$F<_dj7bcnZF5J2`Pt7`8HhOM=>moI@u8oW>sJn|X4?9nc$k%$TcM8~!_eKqD~vs; zL-qjEq%zfKQH^o3u7qg7`07DvWVI~^UsZjvEqGqBRrYW}Hlp>4?O6U)ZW zb!zmBm7$?2{x>h&(9c8sd^%xN?4tI@C!g;56<=dIQ(k%I1f^Y>FA@sv)$uNxTPuaTk^17ZvDGJa1$wX-0twnll}3NXBO}VlaFj_vWJLr z7;j=ct}4GR!kli+idiF;xsKX=Ls^3rTeSRNcHC4P=(@}8RxKmrupAV*#S1$L_d<+y zlMoBzqIFE!jquay}#HL9#9EL`i*!4z~Q37Y%p$D8#Am+BYGg?yZ{a@+`e(A&#Mp44`{Zhgj^FrS@cm zJTe_qjU4HyjqT|zxD~cbBKcu~s%qGDLD%kv$hyFHwUVzR?D;uBC_S&DKy9Qpk|73B z{CP|8&={VmQdj&)S<(7)OXI@82;;-}8f`|1+5CF^p6vRlv42WH`j@%yub= z)ZvwMNN`J)4V2dGJkAYz{&uL;LQ=2R*{y~L08$;HX9Cz{XEpfZx=6&^%$4$4H z79N6vUqz^OUzZA>qs&!xK!`a2f^aVfG9KL1&w67OgRg{>#0D|t5OtxnTn|+RT4G3I zQ40vq6|)HZIDXvCq!B%ofj1lf37^V>i8X53La2}w?NxLhvSsooIX{Lig7iV}{?UWF zTt9e>cCN9A{+%4Cx0il(ZNk_kXR>KMOSIff0{t24A?WP!a-uFZm5#%j8udQw?GP?G z2imq*-+nwEBz{(KVA9#!W*7Fwwxg>hgx{c#BOUNEY&_};HHJQ3#d=6FGXed``O(^J zeyr5k&vf;BdWl+4jkd4B@LLxvR*L)$R^+^KoN7(dk%2vLQ`XBPtszf7uG@~Owu;HG ziK1TxrK!{HwWwHTY%t=zI&&9n3E@uRHVka_cDam%b_enTZS71KdK# zjvn57JmW)hD7+l8kT?}E=Fq(ElpgXjumsF^$5GG+Xqm4=Cx04VPVQZ>(cHOj!n2D< zHTjbZRDLp+Kz9J`7<-V)_JSyb!KQ{bb%^07rQxY&|5kdKdd+SLvb178&WrJ z5dRr+5+DA#LF!*RmVd3YV0H~R8nTDbGOSo4TQHCe^*fmjHtMOz2t*S)wwkxQUNeKO z2vZdIvIoOyL6Nv0c&}x2|15j1o4uE1P})IAfKe^rakmjs`?8_pnY2i>2I?1DXN#S> z^$2>`yz{0>DQQV@Y`$jQS#kGg*r|(JYgMC-Nj9FbCzq`}72Z6O9F?Y@ymJU2i>nA- zpwPlQjMUrg6JT5>(q!y*r0-Gn7=5A`(sh8oa{R_btDBwPHL3#9;8apraS`-`5u$nx z;qJ7P&!$XwOq`|t(qdv)a5S$aWuCmV{%Q2*yP@Z=C9M0^R}XDi3P@F10D*V`JQSg> z(~)KP0XBdVq6XY8uCm2d`i8JYsqL?xDSY|DUUsZb5Y^r_$n-OdKMv{EnJZfRGk0@<)EgFw$$)NVT}^>~A=FEEh^pO! z;6I0g3>C@GRL){hNzBEEe(+k7{$hknT-UyvpKMf(XLCHsr+<~f@PhTh7As*=H!heeg`lEc&Z_oQ*$sIX& z;daC@^*fo8R;@4|2#K<$VwyFPOb7DR;rNJ~7Zc6K>aHh>zA!!d#4bXZ_H$bLKHh67 z%0m?b+U8&vsEbduzC2nQ_quUVey=^vM(G$LRB8rBwZlYBs!;`+ph^pxkmtq2Q^+pnUo7;bUuQRcjFo= zwqMPmC&8(ynC6pkuTnu=I$ZR7=I-G|6^OM=E#kd~PI}?|Ie4jO>cAlrxS7I9hlG^t z4sN|RnihGL7>T*83i(#K*l%q{H}chZA77x~bMx;qtjF_^ad9=o$~2~=KnI&2RGTJ@ z>9)f}8(}WsBmGiE5Agn?wB2!GN8;9pfwr})uX2^q~w+eASpn4Ha-hDurY@+GfC|Pefe@XQ(ywo1igS6pNQJIirUaL?Ub> ztj-T5e+)0J7|-gqR<5y8ONr{2id)zU1V8yRfVxCrN|a(tEv2W)R-kkS>Vy1P@fR2Y z%-uL)lcS`J$^e3}Q>qws1kUceJ4wjbhaUDua5XL-ymdwv6!C>AcXpXqOgfJ36B-IzF(_1O>Gu;t+OYPYKYX)~b04A^nCuk1VG($=pji(7;PN1lei3xbFViaEd`Jap_yhDsfwZo|Q*gc5t^_NsfZ{c};YploE zdnjrbv8cQ5LnsbJrSaL(teDC$sy#$z)@G&nJU^E7A!vkRcwho*Xj2{uak2U-^F#>I zb4a`K^da5jM5k`eA@(^tR(~3k$Bix6L&AW^;1>g;)9ur0dZ@v9KVWOi5J$we=m~cW z!U&jXLp81N7HuJLUuOja)*52qc5oe4bPEduDYu?v8r2LR9D*n)yKJWAKD=AyP6MGj0k9D5X7@b9&I zzuM4$i|sQz&B}&PX+oug3c@ICfL2NSb4(%DrE;x zRQiQk<^(GwSl#Haul{(fKfBJ@A7+EZknQi*+fj!*r}@*!Zl_7b=H?0Kpf*P}v7>Yu zC5bl+ggbFF55H9(Fd{eVfNNA+`Y7l|Q=z9qzaI31akzQ9L7kplR`tHG{shYH;zfUw z!uju~yh8;X;%6-~-JoARnEYXvZLhK;Sto9}Y}MV5Zc4*+*pC@P0Ayq!vK~xnVhdAo z_feWzU?}-yRN;>r2e4$gcYJK|^qUf;45gR|{Mf6=Swq{{Zx1izY(Jupxrhb#Xt1gq zRhjnG3~aM42N3q2+>&PXj@nUHFdsfg+nrY6)?-?dHT+7_z*aR-@Qgv2e7oK?vVoQ? zOYHx&P5XTl9rdGYvLNB}f;=XE8ry+*fT+YgKwTkWB!8I6SM>#;j^-n;^Nr-yR9;RF zQyJWsmHd^m?Sh4{kab0c&ZmzhnIwiK`U5lrxc3-kZ7Eh!1H|SNUY)buT8>mFugBIK z3KuD~b?-=t@Rz4#CMs8OH60?~IRphhf4qndvRPxo?7g^&b$U8@hp2bM*F&LSORhX3 zv%L-S_YoAh>t<3TRJjs)5z~HD6iU+KmIzRrv!ecR6=uoJVn+2m<4PyVql4#5cic_4e*Zijd?Ir5K{Cu;o_Cj6_2=x)PE~ zavf7jrsndMLWWFfGvzjwu!~4$*dbF4ifTg0h#8kqOlG1nnM=RbKIc5A-#I$Ze$Lr@ zKj)l3X8rO0@mqwC9XEJ*d zj4_4HYcpD?j*TS#r3pW!+oSKQyTmGu@8;A@ynep#sj$o#0TfbpW5rn z?<2lk?ke+q`*a<0xM&{8@alGh{d#5KwRB`rTfnm&7!WHF;ae~52V%YnGX#8nt&fPO*{jryFJuF-|;H;qLdwXL|h#%dy!O0Fs{u^w7b8ds|y zf>3o*w#w1&e7a_QMf@I+ghfGst2B5+_8NG8VjX}JktW>BHaFx*KKV4Jv=ef7tw}n6 zf_`!7M8jF@v(^?`x%)iTbSBom^c=pqME3J!=;5OEeI+a0dXHZS{PfE^^M-GI+n4qR zv*kX2mF>^VslM%kAN~HrLFj$oz9YM$^{M*pr1bqI4#t5SCmi6&*|6_q5R^T;)nv*z zv89Y9Q

)l*Eyeg%4YSQmd(5J=?jlNNg9@*6S7?jMl4hHz_3Ld0)OA=99iiGi@y?xh(pTGQmF(2E5S4>JG~L37XoHUWsj0;;f3Bv(>e&zcj~ zXS{ihlp38CNL(NB?%}5`6QUi|X3jevr>x@R%k~s+D4ahiQ)Zn+>orMipmAC4(ZgV# z1^va@M}uVvY}m~9Z}NAG$$Rk1RnHmhWB!V+ z9kSL+nMVGn6Vf|F<1HgkIlp1pwV=+Y+ywrjuP{`e7zq$uwqr<1=PSiln)k~9elhSo zK+Hai1S=ymB7*S+8a}HPMp)*Ie>dyLT4%NXsZ(@iOV9#}WHE)yVU}K?sx1@di{yT# zl8DIP_|3iK?rEQ7@lOpmK7bak4cKdY`%Xg-BstfAJ|mvi%i1r`+x$&S#|z2#f@bXD zU^jk+sWZSD4_A<9wE``r-vxEz1HtVQtbbmslnP1$%RWM-h zPL7ui?K*a0xusdX;WZ0l@p3iPVS zL7VZP>1E#8vL=pP;Wus;pbbOi)?K7M2d?%{#L~#B6?DBT6*>TxTTbd>sp~-I5T!a& z*m&Y<=cE5ZCF8rE*Oo3J6=*+$bG_%u=~D6bvF1#O6xoAnr!CoH7gO9j7FFXhfmWaf zFPPSWs$7?LtT?W|zd}pXAMhbB0rBJzYQpA#MtB_2lHMh;F1npiznkXm;JZn8te4{8_odeKbRQSErL4Elo{t$9hzUd%oQ`TDM5$`_7bN#nmtk>x1;+}1X{YH0 z?DSvZw>GlCD5$Dsar~5uN&3#0n#VW9o~Lj;G0?P&Ef?rNDhNMl7}rNYx?ohXY2BD%)SdW90l4a zT7Nj;BKzP?Zi}i}WBZQzg}mpL8PkhsCq`N{l$M1dCTCWPH(@;+a_2~MYZSQw`fr!g zdv%^<`X|uWUL}fuL$63Z)+_Oe4$PgB>u&w`Mwwi_1TVcM(;z;tRi_CUje1eCmn}Ox zG~L^8w&CpwcYFK%Yod1CuMa=Gp#Awf!}R-CvgaN->%BIQ$POqgWvZQHSMoeTe(r_> z?t^-)Gi~JG87;NJnUGaE+u#(dJw5(RicO5;CgB-R-R73#ceRon!WfmHQ2vhZEmP_i zBU%I_gI;+W`F-~klKU^2`8zJNDEct1JlxmLvwTn=aRFsiGr6HjYnRLT@!yc@*bLTV z1VS2z_Ynhydtod;xRJ4lBWpv<7p03GL5gte^#;LoacHu739sZc`9*Dg+@Y|(@K>VGm#l}+lI})G37mD`m z6lA2tysxUL$Sk>o6Jl4(2eq(!JaP{^_W4xrHMZGHpnQk-*yW(VnhQu}m~o20XrLkS zu~f6kw}X0-o|T+TQ06de=XBC==wl)3LzyNf)#8ol3I9M;)O}aOHz|)7&+UNX;FZ(k zNWr!N=fd%RK58I6JM$OUGok%sCYXf>yF)Wc??+l5Cc4$6_1W10)=j)&TbmD_?&u=t z3fGslGL|Fsz>7c?$U1j7;XY|4xiKq4FvP>~qZ->1RC&=S>3g^b>$hQ0cFM<`FeS@FoO||1saJa)2{m zR63x?N=&^|f~C(v`=ykqquF=zi^0KBW#x-vU?H zA9NK_b-M9-(YHlL`>H#mMZA=cfyo|mt)G>&;hbvug)0_5@7Xpq)AeK`1*n@NC6=u+ z3yH2EpK=jzS?c>`#;f!YvuU%!n)(IuMny$Nd9bIW&5t)+My_OVGpS3=8NvSYLa%Ry zFf`iIfp1>#hq;hz&nKQ67#1v`WJXed^$*|dJg&*{Tv@DC=yp;sH!qUpRz$dh%Bni7 zD+Ss3+?00uT1GHZN8;kHJ?X)pCd+I@?F~dl8lxN+x-bd4cfdXT#L!cDCyKJ~d#%{h z2i7*daDeamE?UY_2UuF82nBvg;UGqfL!kWJI`yTLzKpt-`Yg_PgNt~lcEq}?@pH3} zE0rz>3wv7j3`tK-vMJ;okvs=J*r*z!854;L_fhnl=jKK=f-`p6*c5?gjYndAFEkuFxGSN z7dt_<(<}TLeo+&(j3rT@6s8HNjd0i){J})>sR?ZnR>YoQ&Ul@V#-PKslf#1u$J+>L zAK$sZD}7@`2~COosp(yU0X!dG4)H#VR}(vHjjH*HkI5vB4`=gBuIQI!=u#4`_15|I z+MqtQ@1*&OvBaJbR=kdl?C7STWwIbvnRgvr6$*jQQ2|#>q7}$Rk{N+Hk$U0VL}-3q zLt@i(6*{j;CvosK^k79mm2JF8rmA{%H(i zARIi=^wN9*UQ=4j2;=AzUqN&Z6N;}YCDp9#e-t3Z)A)%O&1n4tDp~&O_YRt1Z|t@% zx;D{&$g11v?yYe~8vfJg55z^%{8IoSxmx%%SqP4#xQuWcqGFBc3Xt|#*U8!6Vd}Gf zuxm=3<3+$YolAGIUFGk{cB(i~lXt;;jRQ<9u0xpe8Dz=Y0_@lmeZ%q&nt!-wg4m_w z)L49G-zWB74QH#!+eLYKCMNQ6^HS z@arf6d?>zl^+Bu`Qa+R_za{iexGwKr>aVAIdL31XZ3=CNjtdUB>y;bF-rkYsxge!Q zpK%`I-lkUGm1tH==?FutZ9ht~?veap!cA_@$@&=&>aQi$(fO!>hgS1HhnVT}Zjkn0 zW3obCF|rNQJ45iyBpA|hwDbbe0%$lg!Wqh1 z*sB2*?ZyfA2WQwH?cytF7S$dGmAEr0bsSUUgNIb!Z?@&Bi9?933|cUgp+J1D%5SN! zoiB18R0&WC_s+Fzwbn!J6lfQm5}R&+kCwH>L}cPE>LHA7^ZO|O`kZqPPZeL&yoVQ1 zY395buo)7CqtZfzY6wHE6tB+j8uY^^H=OlMLSuqne;z%|rz9Ty*!gmQaH@%7D*LDR zR(GO|({?6otkc0nh#rZF;9;*A9zB|G>m_Ka$HeC7Ou!Q z(b$rEUKBuct#aMX9?*)3LEp?t5}3c~ zk|~+vLis$Xzpy4ou&}O2rbAA`V+ec_QtJ(?|HbYRnMM|Gn@equ5y`b|C|QO4Aivq+ z=q2Bf1+~YBQ@3IffueM9O57Fc2yB~bS9Mvdk-#HS97WLxQz%;jEuIe|;csU`^rL>*_6tkI9Wd^DrSAN@a7JCKBK@z(Y)!NlY#%&ua2T zo}=UDwo6>Ci(TjLdFy;8==iG_Lntj(>!K${O&SVE_Nq_p-Klt9)Fe}S&ktziD9Mm` zy$-zRz{M#R+j94#M}ivRbVVI*so5!<{i>};8s8T0Fepo+yzserB|B~J1z+THh?~== zE4@i?srF0?ps^ZoWce^}--sDT-L>dTn@%ko3=*DP9^sz!h-8Bc8A{t2nTI+uM7~Cv R(fx6C^hZ_GAKAZl{{#BA@ht!V literal 0 HcmV?d00001 diff --git a/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/alignment.o3476018 b/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/alignment.o3476018 new file mode 100644 index 000000000..6b027b5ab --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/alignment.o3476018 @@ -0,0 +1,165 @@ +Wed Sep 1 01:07:01 CDT 2021 +TACC: Starting up job 3476018 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +epoch: 0, train loss: 1.9497510997616514 +epoch: 0, eval loss: 1.754234939813614, correct: 3521, total: 10000, acc = 0.3520999848842621 +epoch: 1, train loss: 1.6049139609142227 +epoch: 2, train loss: 1.3857794501343552 +epoch: 2, eval loss: 1.2831632316112518, correct: 5410, total: 10000, acc = 0.5410000085830688 +epoch: 3, train loss: 1.3016913873808724 +epoch: 4, train loss: 1.2616293649284207 +epoch: 4, eval loss: 1.2658930838108062, correct: 5409, total: 10000, acc = 0.5408999919891357 +epoch: 5, train loss: 1.2320433721250417 +epoch: 6, train loss: 1.181612290898148 +epoch: 6, eval loss: 1.1402096092700957, correct: 5881, total: 10000, acc = 0.5880999565124512 +epoch: 7, train loss: 1.1643818397911228 +epoch: 8, train loss: 1.128499301112428 +epoch: 8, eval loss: 1.0965303361415863, correct: 6053, total: 10000, acc = 0.6053000092506409 +epoch: 9, train loss: 1.114193707704544 +epoch: 10, train loss: 1.0830892950904614 +epoch: 10, eval loss: 1.0390974164009095, correct: 6258, total: 10000, acc = 0.6258000135421753 +epoch: 11, train loss: 1.0508871960396668 +epoch: 12, train loss: 1.0322130365031106 +epoch: 12, eval loss: 0.9689173698425293, correct: 6482, total: 10000, acc = 0.6481999754905701 +epoch: 13, train loss: 1.0006194637746226 +epoch: 14, train loss: 0.9652800906677635 +epoch: 14, eval loss: 0.9150958389043808, correct: 6713, total: 10000, acc = 0.6712999939918518 +epoch: 15, train loss: 0.9430981692002744 +epoch: 16, train loss: 0.9156872307767674 +epoch: 16, eval loss: 0.8703682094812393, correct: 6913, total: 10000, acc = 0.6912999749183655 +epoch: 17, train loss: 0.8822251515729087 +epoch: 18, train loss: 0.8485424190151448 +epoch: 18, eval loss: 0.8234190821647644, correct: 7120, total: 10000, acc = 0.7119999527931213 +epoch: 19, train loss: 0.8285953049757042 +epoch: 20, train loss: 0.8009484337300671 +epoch: 20, eval loss: 0.7808267176151276, correct: 7228, total: 10000, acc = 0.7227999567985535 +epoch: 21, train loss: 0.7774611741912608 +epoch: 22, train loss: 0.7435575358721674 +epoch: 22, eval loss: 0.7523189872503281, correct: 7367, total: 10000, acc = 0.7366999983787537 +epoch: 23, train loss: 0.7315681789602552 +epoch: 24, train loss: 0.70117900627 +epoch: 24, eval loss: 0.6928718358278274, correct: 7580, total: 10000, acc = 0.7579999566078186 +epoch: 25, train loss: 0.677533069435431 +epoch: 26, train loss: 0.6627033298112908 +epoch: 26, eval loss: 0.6921748876571655, correct: 7586, total: 10000, acc = 0.7585999965667725 +epoch: 27, train loss: 0.6410714266251545 +epoch: 28, train loss: 0.6192339707394036 +epoch: 28, eval loss: 0.6416671514511109, correct: 7719, total: 10000, acc = 0.7718999981880188 +epoch: 29, train loss: 0.6093639281331277 +epoch: 30, train loss: 0.582532714520182 +epoch: 30, eval loss: 0.6166591048240662, correct: 7809, total: 10000, acc = 0.7809000015258789 +epoch: 31, train loss: 0.572193189847226 +epoch: 32, train loss: 0.5541256200902316 +epoch: 32, eval loss: 0.5951347410678863, correct: 7922, total: 10000, acc = 0.792199969291687 +epoch: 33, train loss: 0.5345369838938421 +epoch: 34, train loss: 0.5273816007740644 +epoch: 34, eval loss: 0.5837202191352844, correct: 7972, total: 10000, acc = 0.7971999645233154 +epoch: 35, train loss: 0.5059237045292951 +epoch: 36, train loss: 0.48622317095192114 +epoch: 36, eval loss: 0.5698897138237953, correct: 8024, total: 10000, acc = 0.8023999929428101 +epoch: 37, train loss: 0.47362951143663756 +epoch: 38, train loss: 0.46030426907296085 +epoch: 38, eval loss: 0.5610475659370422, correct: 8049, total: 10000, acc = 0.8048999905586243 +epoch: 39, train loss: 0.44165324921510657 +epoch: 40, train loss: 0.4327346086502075 +epoch: 40, eval loss: 0.5642214670777321, correct: 8095, total: 10000, acc = 0.809499979019165 +epoch: 41, train loss: 0.41423581935921494 +epoch: 42, train loss: 0.40917488780556893 +epoch: 42, eval loss: 0.5602998435497284, correct: 8131, total: 10000, acc = 0.8130999803543091 +epoch: 43, train loss: 0.39171184477757437 +epoch: 44, train loss: 0.3744060835059808 +epoch: 44, eval loss: 0.5633655220270157, correct: 8134, total: 10000, acc = 0.8133999705314636 +epoch: 45, train loss: 0.36267226934432983 +epoch: 46, train loss: 0.3420030690577565 +epoch: 46, eval loss: 0.5533872425556183, correct: 8157, total: 10000, acc = 0.8156999945640564 +epoch: 47, train loss: 0.3287143409252167 +epoch: 48, train loss: 0.316296321396925 +epoch: 48, eval loss: 0.5576229721307755, correct: 8209, total: 10000, acc = 0.8208999633789062 +epoch: 49, train loss: 0.3068045072105466 +epoch: 50, train loss: 0.2929732614025778 +epoch: 50, eval loss: 0.5654072970151901, correct: 8227, total: 10000, acc = 0.8226999640464783 +epoch: 51, train loss: 0.2795026940958841 +epoch: 52, train loss: 0.26673941375041493 +epoch: 52, eval loss: 0.5736668109893799, correct: 8227, total: 10000, acc = 0.8226999640464783 +epoch: 53, train loss: 0.2506744866164363 +epoch: 54, train loss: 0.24351145980917677 +epoch: 54, eval loss: 0.5846156671643257, correct: 8204, total: 10000, acc = 0.8203999996185303 +epoch: 55, train loss: 0.2253616195248098 +epoch: 56, train loss: 0.2177750574690955 +epoch: 56, eval loss: 0.5943332687020302, correct: 8246, total: 10000, acc = 0.8245999813079834 +epoch: 57, train loss: 0.20670234989755007 +epoch: 58, train loss: 0.1973607996288611 +epoch: 58, eval loss: 0.6195310011506081, correct: 8245, total: 10000, acc = 0.8244999647140503 +epoch: 59, train loss: 0.19024320448539694 +epoch: 60, train loss: 0.17597664877468225 +epoch: 60, eval loss: 0.6139472931623459, correct: 8294, total: 10000, acc = 0.8294000029563904 +epoch: 61, train loss: 0.1674150490791214 +epoch: 62, train loss: 0.15718420511301684 +epoch: 62, eval loss: 0.6285309329628944, correct: 8261, total: 10000, acc = 0.8260999917984009 +epoch: 63, train loss: 0.1480691913439303 +epoch: 64, train loss: 0.1384550367234921 +epoch: 64, eval loss: 0.6587671056389809, correct: 8263, total: 10000, acc = 0.8262999653816223 +epoch: 65, train loss: 0.13241269834795777 +epoch: 66, train loss: 0.12871786830376605 +epoch: 66, eval loss: 0.6718123883008957, correct: 8303, total: 10000, acc = 0.830299973487854 +epoch: 67, train loss: 0.11577517866176001 +epoch: 68, train loss: 0.11130036151378739 +epoch: 68, eval loss: 0.6887702852487564, correct: 8332, total: 10000, acc = 0.8331999778747559 +epoch: 69, train loss: 0.09883711646710124 +epoch: 70, train loss: 0.09635799735480426 +epoch: 70, eval loss: 0.7159708231687546, correct: 8307, total: 10000, acc = 0.8306999802589417 +epoch: 71, train loss: 0.09449125119313902 +epoch: 72, train loss: 0.08857650914210446 +epoch: 72, eval loss: 0.7160102307796479, correct: 8351, total: 10000, acc = 0.835099995136261 +epoch: 73, train loss: 0.08085554241373831 +epoch: 74, train loss: 0.07873564483407809 +epoch: 74, eval loss: 0.7119918942451477, correct: 8393, total: 10000, acc = 0.8392999768257141 +epoch: 75, train loss: 0.07206312137446841 +epoch: 76, train loss: 0.06772394200824962 +epoch: 76, eval loss: 0.7328802436590195, correct: 8351, total: 10000, acc = 0.835099995136261 +epoch: 77, train loss: 0.061777200397788265 +epoch: 78, train loss: 0.05721901174710722 +epoch: 78, eval loss: 0.7407010316848754, correct: 8385, total: 10000, acc = 0.8384999632835388 +epoch: 79, train loss: 0.056560877406475495 +epoch: 80, train loss: 0.0528045150318316 +epoch: 80, eval loss: 0.7767532706260681, correct: 8354, total: 10000, acc = 0.8353999853134155 +epoch: 81, train loss: 0.050682742870887934 +epoch: 82, train loss: 0.04895328068915678 +epoch: 82, eval loss: 0.7942879348993301, correct: 8368, total: 10000, acc = 0.8367999792098999 +epoch: 83, train loss: 0.04686643050185272 +epoch: 84, train loss: 0.04325723648071289 +epoch: 84, eval loss: 0.7906839996576309, correct: 8356, total: 10000, acc = 0.835599958896637 +epoch: 85, train loss: 0.040166335769605876 +epoch: 86, train loss: 0.039296497894945194 +epoch: 86, eval loss: 0.8033982694149018, correct: 8376, total: 10000, acc = 0.8375999927520752 +epoch: 87, train loss: 0.038185219698566565 +epoch: 88, train loss: 0.03735689769441984 +epoch: 88, eval loss: 0.8039661139249802, correct: 8377, total: 10000, acc = 0.8376999497413635 +epoch: 89, train loss: 0.03383794939145446 +epoch: 90, train loss: 0.03318257091034736 +epoch: 90, eval loss: 0.8097118645906448, correct: 8389, total: 10000, acc = 0.8388999700546265 +epoch: 91, train loss: 0.03290939923109753 +epoch: 92, train loss: 0.030776230903456405 +epoch: 92, eval loss: 0.8237936168909072, correct: 8401, total: 10000, acc = 0.8400999903678894 +epoch: 93, train loss: 0.033349379108344415 +epoch: 94, train loss: 0.031906195783189366 +epoch: 94, eval loss: 0.8250258564949036, correct: 8401, total: 10000, acc = 0.8400999903678894 +epoch: 95, train loss: 0.03031293043334569 +epoch: 96, train loss: 0.029958056238460904 +epoch: 96, eval loss: 0.8200247555971145, correct: 8402, total: 10000, acc = 0.8402000069618225 +epoch: 97, train loss: 0.029532150564981357 +epoch: 98, train loss: 0.029668816346295025 +epoch: 98, eval loss: 0.821219089627266, correct: 8399, total: 10000, acc = 0.8398999571800232 +epoch: 99, train loss: 0.02980129667842875 +finish training +TACC: Shutdown complete. Exiting. diff --git a/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/loss-vanilla-lr1e-3.jpg b/tests/test_models/test_vision_transformer/test_vit_2d/exp_logs/vanilla-nproc1-lr1e-3/loss-vanilla-lr1e-3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f47b07b80585fa3f78979279938a1f188ae6858 GIT binary patch literal 37624 zcmeFZ2UJttx;7fRfDq|Yg9-xDRGLx}6=@I$XL;xIKJPQb`plYx z>^Wmzt7*E9RC2Pem`XD&`o4o+?^Ztj08JiNQOd3bra zxp(pJ;^q7G0ROj}pKtfC7r!d`_m8r3ad2?)@o@9}+b#dp6RQm(w2N(y{XPfVeh9k| z8;1}Zs|x~yK-jqct$A#a|9G&mb8vET^MK9Z2XCm`12&(718g)G*kbVRQ1Cj0Q;17= z-;tBtA~x4}_WOw*eUS2O*MU=2?P3>($O>B5?}YL4iA(I2lu|sXq^xpCTSr&#*l~U1 z(Ed9i3g>J-zSV_YIGXj*U-zp2QP~q_6Ye78aM5DL>aYHmO^{ z_RcT6*dQGLZWj3b?}q&^b_s!YvHuzao?mvcu?K;LLx_`W-w|%%lQulp{6zL2eXvXP zRLZlecHRS87sz7Q?+o#YD`*oGDZfnnH_QGp!@~Ycmi^y`{Xcf$A-g%)z`^4Xf*>Fa z`tvwt$p0z-d;6eLiqVMh!U8VVQO?ppA`8ODf-r@x0du$_;M;+8VUE#YlPpMGo$@Wr z7_Ki3o4yd7>&(q)4mubFb*+qNLFStDlky6feK_c*xBZT^g98$eAdF7v=^c$o+B?w| zckL^J#{g(zT=ZKqm$bBTgAvtz1oI|DayvFQgz8)oYAY#FDY9MCZQL(uurYCn||0!-=t&liE%MDyrH4D;y5$93~ z1U)1}sd-e*I4nVf1&MT`TGy~39Z*IBIMCs2l_6` zuiL+$p>$7Ib$iWyvFsV&<5^?)_~)3qoQ;+kMqhLYymPQAF+bgft&eO16AH*< z2d&d@2_|8a;Ns&f$jwFY{A}Ke~Fw)E!r=K;8}f zX~s!0<2Xomc?V%!rP#(98q6i{_H@>!U}}7eJ@kztCYoM63z9c4_a+)Eq&CZmT^4(s z+a?996Ic+sXeD(B04IYx)*U$+&gAtdL*)_96-hRed)`#gFT5VonccHQUwKzWszXjJxvuc z6FqLocHr=-dptVdV6XM)qQ+E}F~nS+GlcT7hIRtb>Nt!tYs(7EB*{v2<;vHO{Af9X zb@h^wxFYfJ>@xLdx7xXEJJY%R3S$*{m<39UxOSiJ{R+za83-Gm=o)g@wQnt*``-WA z)^@nuCwyBoz_Vg-*Uh9$yG@75@5GWuE)_8fSP-|z+U6}*h~7NzgV~6 zuTHyxLyr25b=~ZC{aU0O&%nCqi!W_)+pu!;MqBneHnID`5 z#?eZzzVAE=)ZIzUsV^!IeQrK;=lKEH!8OOkXNYr4 ztrC>TbUO2_u$k!3;M->gs*fLx)4O#|?$Qt{EDG-@P*!sb2oW z@M2+SX~%@Vp;p0r&P*HbS)WveJ4dx27{59q$>&9?1XT3GF|wF0rX~QcTrLO@O0bgd zXq5$QC_-oiMc{4@Tvhd$p$btk{%lj@lk1nI)qhM_HZJ`v+a0nSmWnyPlqU*5fNkH~ zdWbob!dVTg#rAq@_BiXj8QbWOFRTLYX1y(ne57}~jWS@A>YzncvvRm0}kI zVf+CY(9SUju(fBK9wBPBR{7_AW#NhP$+p%!sQST+x*B&tXHke%ekvy|D-fi!-9S@g zehf)pSluLtQ=_w~!;G{UCy|bwnH)C);=c1PX7^i4V6y)WzK*Y+Tc?P4ynFeNi$duM z1%D)NZt<)t5!diR?X3dUqgYpD7)G2j-hA*y)$9^Ro${fHt3^E5G4h24sk-^-F+Zto zSJ6EyQ&H1cA+))GY-YwBFrL@5J z)qBfa?cp~XYJq;;J|q^KhWR9#iYjG61nM+l-{FvxARRk^}m`Rro(Lz$k)Xaln|^hAoM<5I3NMTsum?5=PXZA&}r;2Gld^p97)>FjjUJ+Ci^7m{c9uRq+yv#i;bHwB}f2DE|-mspU!2+zDIrf8mXeGc4A$>ID%61)tdf3s@euKC@RE7Q=$nBYF@IuHem9^5z6I33>N7tbwz1H`*yc zr!LXBD^Q|-WV$TywOPPTOs7u~QL!*{Ygqr4iXnZStlBXn#{k#ETrPrq7RE)tHB6$& zrT9}`Er0xOHJ0xdMI7(wL|S@zTz^!*c*Vfo<;d|rYyx%X%;jNa8Q9u}4s84^hv7wv z1Jwy&kSDqiFYY*z8z>5Lhr+gut6QVE10*KIQxC@}7}z@=c1>1FQhmRQxRsoc6|^c% zQ>F8bYry5c2hK{iiUVm#N@WaMeoI1Ec-EudUVEs&U|RC^_|f+-&-D$LB~W&myBfr! zX%w?I=5D}~Nj0`&Qh#0Jktdi5%##_1f>g5ZgHZ5lE8fX3Pw_Xc7_No5p zDUFcJ){}!w%OJWijX8kmhV5J<=RiWWXkyHnCg1M7&)-PYB1Wm9%Um-1ls!P})|dTQ z?Eg|g>uj8WfphCfdAvoi0apsK&h!p)_k7%|1m!JQ1PdYs7o>-fBd~ig+~}+1i9~JQ zo9@rY=}Hw-%eub%Nq2b5zkGWA?%hXI6O1ig4ghewaC6x>J~+%CtqnMpd18fdUe7xA9R&sCq z+lzC>T?kU!!1taDLZtFlqM9vr9Li(&u@mW*gjA zjY%LJh-Qk;(&hC{lY>|gekiFox<#ZMd&{9;DXn;xQSdPXs&Q%R*w0Ka-3`g(rX<-` z9L)jK*D6Z64)AxwYI{2w22NPvlZ3BBzAdgUVpE>$YSw~J_jwp+7u(IbhReC|7VI;3 z9ejsuaErrvK=-1c)SF`nPegdGBpDHtNgW~I4ctLQ8hU8F^;XQcOih!HRa@nI+%CyoZ9?JlCH)8;E6Mw%x;?;b_RL^JtR@|j%-aqP8b95-;UM|?)<4cWR&buA{t z_Kw#TyjEM~?WTa*bDvC(PIoLVoK1z$dmoyFa#7+*TW!gcvz;lZUg~{df}qL1Br?5g z-x%73Xz6qmZ(;r}(e+J=Xg!<3$pLf#%>nHTsDn$W-4p9N8GwwRk*IePX@d!`2E1sb za8>j3e$hUWVx7qtsWM}8;9NZ>BlQzI6*5Rn!h!bH)Ypw-4xx9G#~!gDkh0{CdwI#Z zyxI1{Kc<&$5%~8nyhqjd$FU&X@A~`usLU%6lUw&OIVjuiTye@o)Jh9DV}+M?KBkm7 zl=tMkTC$Dx^9~T%U6#@>d1%>e-=l|K{aBCWcwD_w=YYTEFxGYL^XwTQlWU+X&9f=D zfpQ8e*nXXG|LP@a%O^JjC2vJs+S`6>;sXovI=T~y$5KrhnRHvi+N6jxAWhl%ZVQZ+ zF4uU#MJbN0V&vKIxu%9NkBxrCSe=V<{pIVS*@yNxHymKIZla@`0Yyqb!8wI2O~nCg zJRLa6FN^gd%m%_N2i+^r1XrWznn*AGEH_O2_Th|@)v8qQz8LWnwh)edMl|l0ktX#M zaIq8^r5_um%MxL{sKJZm-ti9vuOR7B( z&)j8DirSBuOeVrQ5fOVPJ>l$RrCv>;MK~wrOI9>i%7aoXB5Qvya@)5;pleHfEig}^ zXX5q`o~c@hjfv=gv7p~m(x}sn0-Rg&-YHZ#NpHo^rI@bv7KkrKotf>?59(^H*bLB* zDw4}~c?o}1?-_=-TWCGp@!!>5)9@v znK#`z7V)YJnt%O13lg=TQG{L6K-BUuMOs0){_W;{c;fTcJHNY<{MQxy|Gwf1H{xu& zZ+IT8MMAIBH;Dz|ZP6n63Qk+eOi1y>*Gh+;>9afF{ce@3!|+yLr+mu8U@;Pm+=Z;I zWGI4e5Dejh+K}`QLoCQ{6WSRTWY`hw4y7xhxj+Ck015gPY+gftj;DrD6bBQ)V6zv- z*8(k_@fh~l%Bl=!$}KgyexRlv)y`N`uiZ5uqV0Z0r1sGTGxmBWl=%TxW4*E-RDz_l zF>(zB<~%k_IKGRZ5FJ-STxJg|L_MB59d&_MNj>*POC@t}JT;vKDQskZf&u3Meir0O z7J?9i^FZ!cCa_T$S63KWQ!2f|A#Ztaj@(^6C+KS8YZdaA*q& z{rIAy5#O;*C!}Rz1;5@o06A z65X65JvZ)8n=&nBOV^I+Q_Ph46Ex~wkH=vW6b4sxqbXT$_2u}m%-a0d`B0XD0$W~D~{2cNh6?6zOzRB0F(RCDuTL+l(k>%HLs@>W(hQVep;Hx8{ce*G@QTR ztQCB+-siess#ni%2JrV2X45wYQG-|^P`r9}5M8rzxSFjnLo1@SU^*N|;fX=3xZMkV zA?Cg8y^y7wDsyC7_i+0{xm6+a<7^Ezyby3Z@u`&y(C_PP4@qc>4lgS=JmMjvFE$-! zqu8CQcFo)Mnjs-qzU=hyjhxlI1OKRZ#BU-Z5WID`J$)x@7=E`rH?x+;Gcj(!E*H(@&Pv%ZEOQy60&h4SnJdHep+uKI#TlGoc{d$1rQ zMu3xblPj>-zVGqGYf^ijhz(LP&ji;F{(v5_PS^MGuC9;`k`YR?AXG*pplFHm?-)gL z&;xC6B^!#)4ic4ienh)5KkstW)l{+E`5|L#`M6^w!THDg4=q6W$hg1W#eUbu`}#0x zY`YF(?&+*>i&F{u1dvE$LyJsouHO7<`4;Ig&J^>I2nw1I3^E+CQg1e_=t{MVRd|}_ zw9t*5{5}Wm_1)z_Hv(qK2JOtFpVVcYCyXh`DDMF2V|KQlkD`crCi1s;$q1^x`-ffP z7Bm{c&y+&Tk#lhOnL>1fPc7=t@WS4Ak%At3KI4sZoA*UtmVkG>8BBy#2dz}m%$@9J zF`rnF8tYCLL~gM7ML%zg$J~^x&1$AWmE@b)zTK&}kGvlU&itJ#_|0rOA;%zK`2d%8 zIk_5gav1&iwL%a5yZqdFMN5GmzP{eVjM1cHZI^65Lm;J_>;G`AFw-IA@!x#i?>*Fp zx7h!w`Y*N=w!hUOI${eeHZ;qr$^P4UWLs$R*TP$EFhPJ{k^`}!_K9csk}b%Yw;yX& zMs1O>bdm3KK|R>G-2I`*t{HW}g7C$yOTR3Ga@uFjr}p!~tP3fJp8KSHUNyNa*#@z# zJ^(;9p}7YvghdMo`ygR&jESG@F${sjyM}otf_Fu3&dOjTp6eVxn~&H zP2h+Z5jt6H3)m854>(g~qe_C+sU`Z;Upz0$>Ut0?uNKx#I2YXJ^oVJu72^f zP;Z&l+{FDgY;DJgZ|(EP5MQPJSP(}T3vyDMZ4O2@pWQaGT>%;V8`2%f9k;s5XQc~y zfPydmc`4Nk?P4;-EDi9rPA z&FoG+j$Uc=m$e6g!-O5Rg3%>e5WEa_3bDzL1PKckWXpKb7qD%D(V5FQ;8-IILR`*^ zK_li6yA~y8)F^IoK6l@$TGnK&T^Ox{30ZQzEoyOhwkS&Z$}e@OC)U0A`*#T4)%5+8|;T2HIF}po?wD9Rt@lmom9bP6v!1L(sR$ zU%K#dyi8HAAJG=0#kD!P2E2tP!Y7siwfv*u(HGver$4dIjw#p-smRlI^{EO%D!FdU zy^w1dSAO-hI&sIsuJUrw_DTpmk>JY(B%9SzpwTTR!Psk9Z`tm|%?zCO>*-U=k_w6A zrz#Q(_q07c$$2^N&jx>A&((}*kdpySXaeZHfHch-ZTo4%Iu5RpgxtFZ1jbJLUZ^2hiW@2)97JTBaEApOe8kJ$Dz7!6b=Aq?d723Mv?$Qb*Z1+-&m zt=AdrJ(xp`S9MO}(gyEuxwcfhZ;VH6F@-IrgFP3{?`?dz&9>!pEw*aMLbmw9Jmu@G zz_70<*5hQMNr{E(!9ukU!j=kW#L1PDSx!Usf7tDQRpj(gwNzGhkP{WyG3>CkrW5wC zXZTqB#io=P4kf-ayX1UG+9y6bh4B*M2}=t3m_nKAwO8tf?s2~Z0}h4(Q)g<10{Y|K z@8$%$PTbIsd{yv(t8d$nP5=6deF;9>-1@lPc=RR4V=&tYjeyI(W)y+UK{Ygfwq_zh z$6BhBWW9HK`EZV#W%%i=S)1xOQXC91NdEY&dJ9GB%7txog`}(R&*}3!Rs$ZZ?Yygrj9&51) zB_sxeG;>GjxglbXv*qbGO%F{Mx1cvH6nHo}HcFXdKcZO>L{lYGPy+!p&10lcrE`@% zvKPCX^g8n-mzxp?AD}NQMbR(ArZk_?#EWI?>PDu%IX!nY4)URq)S^GMdH1!UByScp8`-wC%25{RqGtv><%}p5u`fg8)+~m87^)YT&s?5$&~1DZ zHQYbQc{H?U-t|ry1YwCFI4@$?X<%Ywj!0rb;tT#4ljuKNz%?5_23-}P?PGqx)qN{1 z0fO4)49J!!HwQ{(x5LXi-ht7Z-yO(L;fmM%f_4vZ3K{ytbcr08i+3vcOx!Bz<^YK- zj);WS;1V$V-0dqnE0ZMjeY>$yOg%v`beAxX=}Fj3ak* z9!Wk2iwA?EL7K7$TpqxJ5o&bnnNL=|7CpBYn7q;~ zi1I5&77MZxj9t>b1cXXZ^^R2Mdrx{*k(HmvLSLp z4kR-;LH)p^%a{Qy^<;}83-aQg{f<;?Fz5zN!42RL?k0{VKO@Y7j2M-N3Y?Gw@8H@; zZlE88z0T}bSL**Vk1{20?FF6xEyo4&&lW{Mcj{531gl#nU+}b>9 ze|mxPz8sne2fc6vX>K9jP;B0p`I$VxNcs@Eru&7|%QSMf2HR6lvHn#(v{T*o!pXr9Ls1mr&HCnnY)7pyp+R&EPx*s_UqMM&*;y0eGjoO%?7Ha=qU;MMz zt^w9e<6JN-dks$2CBw^at~!g#dtvq|%n8`sv1Z)scEF%v>ywDJXJ?kU9xto3{>Lk` zJRUmLnzm>YFdgVpQqwN*LDSox@4Zai74t7AuUt5MlILZSoe+~0o@{*H6JMX1zZ9hZ z?M;cpq@A5Z%T{Klznb4kVHLNOFEM4C^AZcgKc**M^RsIMg6q?KwL#%X@*E_(u zf#1M?%L5_TTG-01*8Y+K&+*h*v#(vv8qSkh+>fu}4=P_R*PV=Byk++MURPnU0e zUXAa4^hKhy6Wo&DDX0r0Sl#+IYS_f}UO=w1#_lNf)NS;;w)X*p7#5^@^~cnp8u=X>`G5v z@rP5B4)NKg&BgWA>rU59-`hFvm+zo%!N@j)pNFAgA;?t@Xb(>_`9}j$#Sl?s`~vX0 z&@|=~fTE9RWHnvV1wBXYp0k6#UxQy()f|oF{KOr-VnW-$h^@l52eZV!z>cp&32uX%N#sMtkF#TD-fZI7fZ0y0 zFkPaEev%wIJ+fp!kYJmIp3v2DFsGP5d(@)!+(P7!pGmL!Q1U`?kgwMnDTo`3a42l@ z8+0<9fJZ?F7GNAoVP)J&H$JYiAlHAkBu*|EXl0eYVs^10Rk?4-^}TaG4KxMxkrwi~ z%sUfOZs9*#xQZ=`?9WbqNU_&GxOe*NeEP}bI!mMHxqZW1BtcgljghD#n}J*epGT8; z{0oN|?JqF1*Gc2!!~2GhJx@4fT2TFxy{6>=3$i~F+%vEsZeCncEvWGCK>Sn&UrcO* zlCY%I`-c9%-!stbzxVuqBTT8xJIJVlA9hDH%tnI>EIG`;l@Qx#P}nrM$8$!Q@)dL3 zGIlM^!du5p*kj4lFw(!otS+(L_e=eGzZ0;NLypNkZ-Ypg(P+^C*yI`^GYaiMgw^#F zSWw&&ia(VbhUfeYeZ|keFavi~N{f2Gohd`loh4s^COQiVM=lbtG%Q zzH@G%An3K1ZD2hH`7Q=` z0VETVoVjCL@}T@og+oGq;nQa;FUH1(zdgF>6>xWBtvp!?cBWirgv+V><>1sw1NT`4 z*BjJ8zlr+j(YrP#!ER;$?1uRF?!WUI@HeF-$Q>qU#+1W{ikE`~JVGHUX&<62uMSPz zdDwZ#{GEJKPa_lv%3A>e?CByf8Ve0&L3+P`QZqbRfO6@=j}^xR>@CaswyL3?HD9mx z@|sCsT6?DoXJ9!Dxa!Qsf;?*m6ZdU(pf2L4mRk7!kB3ZAbanmVxN8518^bR=!huo2_U8D0ODFsY%i3iw4pAj5c9oy5gws{~NvFHHCx?INeWMhUu z5d>9x!Kk+#6d-Ex6L?nE$l-HZo&x03j_9-cFJqA3n8EZxP0ZkC#gyMS#924P4Y5R; z7n*Av;fejcMa*0oA*_jN#A8Wo(L1>4l{ybL6)*KKPhYDxI-g-D-IhszCo|y<@*@0g zxD7pzSwI{|GNY?$va=wcz7%8U>{*b+d!N+=`PDnkpb^DVcS-k5RZI{4`0?JoVsq2b z@TQ*RW1?bA_oehRsde)Q=apPt12+ThLtrA9Ze+DyZ0&4fD<37g7y21Tl8dcCsT#j+ zkW&R7(9Suc8csIk5=T}VcwbCfl{#4{T-Cyy;*VLwQ}N^XO7iO-PWNOA@biB&Uf@z-Ed2cm0zr$F(IJeQy4f%sw8fZqip~bS zAz8<^%8sJX=1Nd-orrjS?nEMvZ*j{uC0kYYoLgh1Jk$!LfoMeNC3FR zU$=lr%{UOGlpc67wXq!?WKD(uK8VRx34G%276bX1zc;xdw``9SVB4aaEXemA7K8;c zv$&E?RC*w036t;uI1HUsn02-tyvpRWTf(O(@lSjfVsBiKc6ql_7(OB&4t(NC`o~@M zZ)sXF=5P?m5^+t?MW+bJ9wgX=c6dt=(U%LYOKynUkX{p6Vjdya3VEuR)kjbqr`iKb zO>Xaxrs)Sfv=D{|uGsSEP>V#_7DUiz047LCVGf{@V3SSmXcI#7z!ew-g#w=UT_ghe z4N%wHiZdyb{7)>B`*TKn8~h*c(wWEAxV1rH%w90U%n>1UZ?(??_A^EfHf6w8C&g3z zumw?bkEdS8mhDq3)OVi`8X6j|);eQ$*Ah82IYYHfAm}`r99j&D_f)2obZ-;YDqjk+w2HBIdwm2!r8hg|4vcnE0^&U5Fm}*?&wBKCAOH+?9 z9%4O~%IPLFb7nshYKX-aezxS!3?(7BCq4b`x_bQ;to=kDk2WZ%a2QUMT#_oLhx;JD zhl8l(;C(}me-eWjW>ZPzuepkhj21hA#{FKZ=4*t2kQ zJF(TaF<%=P4R8^!pH-$5i8gPu&YGSt^CNY33gGb89ym=u;_PEX%hl?>uoTnQzo2I1 zZpng}zx+5&+M0$%mQFyEqWg};X+<;Btze-xCSpr6Cx172{)_K%+=x4Fs+f3H@IjOZ z?gpr|&I3Kq+Jg%=CxjTOBh1hO7NnI6_Wy+o@eb$js26bFHQ>_v3oJIe`6o^y9}0H* z@7M;UA-LB53TNfB$0`5q@~4b|j?u|i*mt@lW+Q?b=FZWE_-lY>JBavA&fq1a^5b`F zpnkR?x})?~u*Wkxk`fr3^rU|D)^*)i*tt_z!=7u~n1q<^`L9dG{&|VnpVn*fOC9TN?rH1<4+t`>ASj-H0T+eT zTAr=dTXmg70xGs)xSM&hnw@W$()82hKjUUBuf1^Q5ew1DZ@eBVr{#rbAAbQRqOdypPdDHJegGGo!Sv4s_&l zjiF^SntGlExfFk1!lx&d1P2FuY2qudXP9EuwuM!$HqOk!-2c=`t$~be7Q{Ud@6$;w z1&?dEXil-i*q7z@At4T^!HyYaAn1kjRXgXab)-oryp>n5zn_A?!@yUe{e2fMy}4L@ z(DdSlMqQAXcd?^-_Xoc;jyZz|xzKV5O6i~KZ87p1U7V+l$9echqqzlh5(qAB`%#dC z_c~Z2#jrHFZ}4?u`tlrtd|p#vX(um?exP#Miy7}qGVt=Ys2c8DlH=_q^2Kd46}t>^ zY}SVz2&LD)qe~`UfFVcf8LVpV0+f((Eul2is7l*wXnVB4#*76~N`MC%8GtG*04OI* zv|~jK*N%XQoNsCCXTAS=zfaN}ElL6A=M;$^0|S>d%E4!)n9dn#az*1TMT?wNP~t9? zy70roH&|ng+GVwuuDWEuvoa$1`boN{s{2Qa5T_}!`?g`JFy)C8-&*h~^_@v1<*f4a zID*mtwu5GWS^?JQJ@bAFfJvwdC<~IG1tCL!lTRF5AN<|GLBMwZRZ;y4#q-bQ(M8?5 zL40%`3kE~AgB^oWU{s-DC`%@tqzFd8=p{m9v);ZaE^+WuG7m1!*n8LFd6>Dw**{Xs zhSkE(TlR*je&12%cyDMGN(=f77cyTR)RJp%euPT3V5C>0 zr9*V|`Ps&X%-s)HA%f@32Ju^T5nIw42#%u>vMo-O8lqB!p%KM^$b3r(bvqT_v1fW_ zaYnLjSeZAXQb!E%^0nInVzuC7$3IK>6$qqRCe;ZrK|K;x0|wCAjiL# z`3_OLG0xo2D8#x}M&}}M!?zT}En6vh;kW9Jml0jwq$)MWe(-ci6@8HFHTvqm$6<2? z+pabcUQoPY;@9&Kkdh0L1vj|sGt967*bk6y35TsA0$3el868C)S3&IO0$J~1lz zu@rmY24Y7nICzGKX06yY2PRnOkM}MpTWI$57NwG~HU0^@n#|DAh*q0N4s;DP$hvdB zr5tiVn*mGY(PNi0gl$FdxZH zcNpz-aG&7>X3G|vzRbZCE3zvb_B*t+rWBYR@LA*!NkJBf{aol843m=r-cv{<*b0f* zeRTz+fJRh}UVHcg9lJW1BG!?#lIEQ@U8yq+!j@x9Jya&KxJ_sd8T(-f_)h34(u{0z z^;P|5?B^M1c3de#*i@`x!ws60zCg~}>E-T;172VD`jpCCXeiP5{rc+8wzCpa6jAE; zXc-qReI^lwTcb(ce$|Q@%>FoRv=gUXdE)E8N>cFuTF+lHY5ybWO?6&V1Q?_~1;F{u zWe50@akh*iO&wfxo`|6U$?7~x(3)?lp6nsKWc^INDrsq--kmDuP@|f&Q}O&Zw=%~v zUNr&uK8J{#6{1VPqA+{Vfh|D5%eVA9PN;Y*G9$O(uKIm%%Bjo}1)*l)=ntrQh>En} zN^)XtUh-_iU?TR0Cf@%v13dob2@^qEfoafgGE z!N{RN%IC`Kj8Ki<9?a3|k%KL<=f+W~#7jq&eR$8CqVfeb|H&Zh*Pt7k5vQQM81Ss{ zZbSPn1$%1REBDL`Zjf6iwa;KGrfnZHKV^CkY8+Bpdk4G&Wtmvhg*MI6w6MCzN>(K%M9v*uIoKl_2attsMq>`Z{4 zuRv^PZ^qGdm2=sCPv^O(6+dLEoAFkd{a9IZnZE1fWloVx1~pF5<&3RyiSZ?)ocY2I z^WxaiqfJLsCC%l-j>(T@0sRDKJjIvE>tBoq?se&{qiP7U-BVEAk9xryDvqh>#xL>v zk8HQR{umK8RW$|O%T!je&5~rUj3rw$DaAdgr*$HD^Cx; zI!HQn=T7*%rCvUsM-D?@R>8n?QD)zNG06O_DD7|6^=AZ>`zLSz&&sZT za{|V;4=01YUOA;l^j)nYXyQlOVfCO!RRGbJb+fVv<eA~m!K{h*et}IZ z`#mf@s`h@p>LL`oS2Qggc^MR;>0?G_tCLs#DO*%DrMTUiA2Uq>>}lN??R(AEd%DS6 z=q%r;rw3QrOv)K$We#4d$ikt)YFqxQ`lc_S1nf9w9La~cNy9zZQCbxu<4kcQ>MP!G zxmiGIE@H>$Nxr>9lSD)2xjg$O+66;Tmp&`D6JNs|c7whcR25D3f+sO!$?GIsEY33z zuSpuibwOj``^f{zQL8_+k-P%@7Q@={b_;JD?s;kX2kZ-0YD~3I%pjWH+F#M_2)i|2 zuI_X9dsJh6(kXeg#0kIl&C-BB`IY}_n7lhr7NyjQ69R)IK%zTiKk3|9TLe*&z}KB03t0CzWWY;mr^L>4B32Y7MUya zKv~8`Qa-p0ai+8v;guX2GG0~qneGkv7n>pC(6X&JYTdUqN{e6b|JG;1)VZM^xlqd1 z|ElQ>Vj%Xue*Zx!tfnf^B`ibul3oz+-yYqonmatPge#PSQ+^ zOpEE;U5f4?-~ZkQvs{fopXxhj72$x09 z!V6;K{T?Y~owMXCJ!{sE*EG6(fuO!aO#vybm1>$Ii29PtuxJvs?r*tQjU{L$e-Lar*2#*6hqCc=ab z4QN+Aq=I@8oRjXe!iG?SCNElh;^sY6e->2dR))U50&D0Ht2u{`Mo4k zTLQum_)o0(uf>J`8L*N0*Ldj;j7N0u3RD9@?*W#eLY8X|rZS8>?a@=dwWX;?)H2?g zDwxi#$dDm?5`GJ<>fn)ESsKP810meXgEq(V(yx${T*+H8t0V75Yu}4LopS2HczE{< zm%&}7vpwXS8b-wyU=ldj*(j{|+DNy$z{g0Umw!`#w-o2BTT5))T#cr%;H>VsW1O}h zuH6QgBOei7m2&AqaUdgNCH0&4`wJy*{o>QOfD2khr55c*@HC0Icc@~4P4v&~MAK9% zx!E^e5&S6y$Kfl;mSW{gL%J`^2q05yh!wMN={kN*yh6Uw@@B? z)h!?O3Y)8k8ox$$RGw;^w_U>Vpk~|i_H67$U!H@BFE!0Pxvj-rP&jMm&zWbIkdDYf zU8$q+_kzfcfPf-yDr5sD=w;~#vYTS8=v>IJ8~5^k3jYM$Ur>|;SLKUzQ=bc-pOW@Si- z!Q#>_6~(DdV?9TM=ASy9vG#GT#az^R- zd0)HGAd_Itm2%p3`ccNKyOxc2l{Gdr#oUv>0;TOz_+a~_lw$^VPJ7uYKjzH2p{K;E z3&u;&no6|_*A&@n9CavWc;+sZzNoXx@4S8+=;N#7RWVta*er0+9`;xqzGfnNg`4oV zcFzCq`~P!0FB`aG5m3W>6#rjKmRvwU#vxwQZHQPlFDdJ&%}OI8a}f8YI!pRq;?nxT z_fD`oKXt$QBl!Ra@W4hg9!he~q?w_LI|AXdAm~|(h3^_h>&?AYB@R>+_{}AAdyJf9 zyj6VH&?sa<-ORL!EWrQN*U6oe+?BjCJLT3bCEKtvOF~B7W`4?*0sM%SlBffABZgv? zKzNss2Mgjls!)S^-jtXV4m)V~?9O>%oaPj3zJ(#RX&|3jnDo!RD(FW?p zxhc#CK^63K4V2ypc7BIo5-l__edfu_ftOz1C8rf@C$&{@tC(AjciHbF92cLrUZ5Zn zKsD)L6q0;74`g2WH?jB&>kdFLMXPN_dyMD)S8_$RFL#Q@qaER?y+2Y#f>dFv(qY~6 zcntF4_51Mh=dDhv*_jK-T&>%kEF4CXs0; z=kk_3h2f&|qTWZT=v*&T$D?7z3(2v_`pQ*rnlgYTfr36tCH+br@POpYIfF(-6Ec(0 zW`ysSb~%REO6pU50{HK7hT*T&G)EB~yJhxYZK*pXt#$%&@=t<3xyDoJxht1+GyH+;g#qa29vXYk@{YU6{TcnQ9rqM-aLFh#A&D)GZ@-4L~eGBbIX zEC|^8f1FwWPq+Oaf7xZt&W&u|36L;oTj?WiX`=?2R8k<>0Wx83bjkB(I%G`Q@%1$3)Cf_ZH;)HN)?v-3Dx`jRfjLog#Ma)K-l4x`CT`6qL%Jsvj zg7&Ow{zK_=5|?jZad(gp4ke~f6v%BD2Esb84 z5_Ivbz}Or~V!p-ZhbQJF>(MN~g*zYZ1YM(#(o#xESUODlC){`{Dx z|K543MDtrj>+p}-M}eu6t-qL%!<(o6m1Nd`*7c__=f9Cl`d?-|M>0@JGeWZi6e<*O zMsUz!B*aRk9!--e49eV>5iaY2!MV2h16WOLrrc`dmn#39=Whm~7;wT>X+{LlU}AtZ z)c~+9TF!F)Y8&EHRE#zn`J=J6F|o5(`+@>nL4fP-vcf$lneu(6S*j%>%BLEM-#}04JV=2NlDioQkj}`jXtAVlkVJR1@x@7KJwoFK>aOmP z)6F5%yc_F|+g=`b%l%_-K6rXY5_=iD(h>0#vxnO^nwo1ZPN zRh|D9zO6m}%Et5K(fIMB@m4x_Y1H}cB&|c;y&{o$Y7+z68y>|0%I`gtYe&uv1X6sX zk#*w5`9RVvL1`6{uM}x`j^vv0!hxxn_3_y3?%oKO-gmuWC*CQ!ld$fUlN>~e2)LpZ z%?Z<)S>L4B#m+|2i>#0_D*}2TcZNQ`;g@Y+xb;R7Je-h9T74!<}1&sZari{ifbp1HfZN(Ef_flT0W$tYd9Xg zioX1#Wc^Ni|MtsEX^d%5X+37vx7IINyUmMpTLYA>JCO0MAlV&hOgUga3YSKkMnp7` z%@QuIte;tUR+oO?vdD0CD(a7;A~F-Lw(&r7+ibN2T~w1$^_n971l)%43h+yF!TK3# z2ZBtA4uRk?vw*61*PKrq2GK5I+x4#6p^=%FfjY81yEt@jJM2j%>bhLVCJ#OIL;=%2 z=>%sgPR;Gd>v(0NjG>>Tl}WJb%XJk~{mY*>slF97~p*{^;^Tf3FHHx;sb$(r_@r}PzX@43VG zRO}Z!bY&p8CgPcrq^eGV_1%!yPY<0uqp`G{HKq*)ffgtKyKwR^KmR`mE&ucrCx1$t z{!iqV|NZPkFtXcv645rJKVjVk<6O4I2vE>BYT*jx?VmHIM#TwxYE;&q9+46675kvs zyA}`Qu%;hC)-{A(pq)DWhTj96xm}c$re&}V)n$PxLUJHl%eglPIvU{z6 zMeje_w~RO)MS5@1Vj`GuEzrF`0GYqB0N)8RUQoc#VaoEDKcFU0$ufh@?HnlLR z4>Yem0w=$pU!2LdWRDrW1g16&N=(sdCPT&WU(w}35v`7pVM(>LQ-Fc9U;U&HBOv(d z^=)Oyx$r|i<#Lb`TpzIv3m&COv>d>Xy#*VNIw9y^IBB{0YJq3bw3==95m|or96Nd6 zcN3Z2)Otx~f(kK$Fx1|{HKx|ONV;t7vaW7&^9B;@0jeA?9n6UQ@X7lQPly_%&hj*Gj=lH!x7_VhN@#?x6#`;|xk!uS0F<#Cga zU0>cz$eR9Oe%C{OoWcUqX+@N|78>FJ+}rH&7W%+uXGgezn6wdWfK+#teq^Qb#4o{1 zFH#?`)@yb%kH1w)KvS1ylb9UV^g~%4rdgn7DJ6!P zbz3i=!==vVxwVOeqh39~Wpc_x_yuX4G~nXLwH!}B0%*2i4?7Qox@|U&Y<-)d7A$T! z57D)%GVc1gj&H}F@2~oj|phY4t`k*2dslTy<5-ilw^}ob3F%D>g|tJ zZ`{vcYnRKvbfFZRgLds9Op{!ezH8S%rm{5=xi>jBH9787qIWZnQw1ZB>sgdXI``l8 zatbT!yX}1I9NW=eb|{!C{xd|UKNPqAY;Om{s7Nbtl7e0SW;;6C zHsf6z!|H!wP~_QXYn`sktADLP&zQ%`Eo;Cb#Vx1qYLW2fhao4c=Wm?_eFxlfC(aI- z^?rmIo%xE)z+`({DyxD%2_Xs zyvdBI7%NwLh#;VjQ=WX9y9$U-FAjJXyWV(k>S_H&-gP+I+oi<64BJdYc)_V*w=lmYFRlP^4)2 z1G(Ud?LoGFZ1-Weha=zcO}b)UPPL}3b6D&>kw=RkmD`r6j(G@428gVCWE=Q^#Y0z$ z0^j}<(lTKN(hvJShl(w%+uz|t8G~021TZY~ zQ`uT!=W2lpsz{S6@$iPdr#Kn=l)+Elr*Ia8*xR1R@cAG(6uu{(x#3^s=ILCOKx(aVNR1e~Vf&g+iDa2OClhi|D(?p&+%rk`z0DK?k1hH)g@swOvA@DiH6Q8)Jw0hT! z`E}*zQ-|c<0+RIOfxI*%^VKnC9O?)#-fCrcSNJj$$B=w%{>%dt5F{~hec=x2mJMrk zi?{RKi;IF^?=H1>I^w`2z`GGXYS9ec&r)uuUqoI2)~SNz+?c{zI4t7bBA&ZKj-Ayy zFqbIfb-cW4pm13s(=Bb1`1N(mfcg`@j`G8yIB@IwjS%vew6XsLm{^K*nu70U==R4( zOyfD+2(8KI1a=ZqF69#^uQV**9DU9`I$Xh*eWY9QjJ2754H)llpIsWcrvBw>+$)Ea zp&*rfk1iG89c8l8b7Bdo!8wL#S7!AQKlD6Pyy@&fXn!hs&_{Got|#HFZP%d9XU=_T zu>7uWZ8YsVxURyH$C>ZWmSg}mWa!?&rXk26QoC>>Um}7PY}Y+*MON-%J5;UtG8l^j z&xvL_AF7)uT(l{qWiS>a8YVqh*UD{8`$GBHcbut##IUxzBKU%p+Ax=G?whuCmi&#Z3&YYZvx>c=a zOv`v$Df_<<=%fmg)|ow`uI)eaTETX^RbpzsTT0!Bx_-YSis=$z3bkJrUk*AO@l!6( zEg8^FSY2usM!85Kidj2;zk!TvMs;#If?HQ`D?si*P-QIOa$lWIy$BwT7C`a3Pin_s zWF~Ji?Waass{IwSDbZxh+QebPN47BG&n?QThlceh&vy^*uJF5cis2oWK{ewr;MLdY zTo`hZjSQgq9pAe=tk}g&cI;TYNk#I<6n?8ly@NF@BITl9kZEDB`(g z$v;Efk9RNPZDE~&9$?7>!RS4KL#K)|7K(RUN953L>zDG)bl#@@&e>a=83;+0XG#WsQOe37Yy=pB zG7^1Sl)tMa{D<*ye@%M*DFh}e?s__bHtS29%XwRt8>))-Br83B6}{&YuQpl3Mu{`v(F29 zGb6KpSVg^Z{E;=VtxQ^tB9_6I9VoobMI_*E7Bjv6h~4*hy%ilTF!^2!`fxxLyx;#J z?7H&@?kk)rO`zVBf1r2s7B>*k0Ij^JDrmJ(AoU2QIj`(CBJdVn;4!5tAPNJnVvOlK zAGAQyJt?BqL5&V22Q(LPwOCajvtJCpq-F?vq^Hi1!=CO84AE{01=GW8RMo1g=AO|( zH_z@k-v#?zEhyP7$q9%!(af~o<*3{|$!g~2^1KYIjj1qC-C65d)52V2mN5+8{ZQR$ zZ!-%Eu1iezIUL8u2bhwzolH`c`w^X4}LFNn z;qA&Qi{%=*IYb0;7ZOf}9@Lpek1o=hhnLGE@zX;$2Fb(at>VCL(|cLI$0Nn7m+kQs z42qa0G>8n0C6Eg1`JN2Bss ztkEl{6S-ku-+xi^cFi?nuBaSew{WEq=rD<`Twuy~SWDiT>D-n{stoa2k$4{0O!QRR>^YXpT9@eqH_r0>0R+I8 z2ZBpyXzOxJvmST!r#624;*gysl=Vx#40@(&O}jE`UFjgG6;!$v)eB3e=ITdiSIF3X z#n4$ipR@(Z2F-XLCl)tRY`%(KzdBk0O5-xu+Ul;K42YZM&{C951tTm4LwF(IKJY6bf~j^#9djx5EZ4lD3f)nHHcSzq2QR*TAtg< zw6C5j)gTAahFoAt1pNKKj6EG{-bjRCm%mEX#=r+7+ElqXH! zO6M3%lDF{gK&*ZAiXv}f32+*^_e@Py8}7ix&f(L==eyxkvMsEHFYZ4fRY^V}197n3`&=grT+?WjIlX3c@{>1o&Y;UaivD!zes>YG}NP0!Sgh0gxBKOx(E zc;1ouUx(r8x84d&d;@eBOCs?i{0EKRNy8yxXHGs^spO? zvw!-7%O=$IJ9T`wuvz5kgz~2E)se8b)F($66UH&%=X}9Tw$072h zWw~s0{2=MPP%)z(!`HutIA>SxAshI&B@beT~xHsB2R%90ZC zBfOOiEz0{n`Qwk-`AB78`~Q^uJOy@Y0@X*XL@;=QJ-unj;ao};NE?qtvo9$zv_o<| z9=LhuxLa4=>v~o5d^?dYtS>US0`ZAQXkrN#ZP*VH4Mc)Vb4qR#Q4|36S0AC+uLu5w zG!KIk|I8wWLBEQe`mMUC$%XDY>-f-;sAc1CjWZXnA2B-f-uxjn^N&m%TqH{%(1dKT z^x1J)DCM72T1#Be09n`5Fweoj=-U zw76g4-7s4gqOOv$O+w2Xco5fKlGK8Zq&SY6NOosVrJ6N=(7n|#WgtCNVJ~xld!0+c z!DsMsD+Ej%|Ad6|?+SPQ34-iDLYBFv5&I_BL*UGh;EHfHtATW*<&|4pN27wDTJ7du za(9=$oa=is-pU13rap|GN=6@2&@zF8BQd7jUenWv6YN-5841T~IFBE?@XhA*TBrnOOk#tAmnsnANOZJ;koi{B@{sQcQsZp?Yb=A8F(j?EG-QQB(RN!uG0T zi*i)LjlAg_1$uYCXN_d*ts=9#YhO;AN#^Q%WN!iqOOVs6s!h{5>K{zLoC|+#`U^T+;zng)gOpT`$3#NHpvEa#eXu+!7XuW~ zH5eQKDeK!jUZ9?5USq@h>Z>>trmAYPLB260PIbfihJh188=NT$tWju5R>W0iWW6Lt zBv4iSV?8fJryi8eq^E>Z8>9QKetEahrhDzfn5l|A*VTTkdyG2Xs8tS5Z6i)ztTpus z3uqazyvZ0GFn27UA%gqPZX9zsREcaB4z|^iw`UQ8NVQ%+125crx5HnFZ}L>yJP;*d z#|t;NOx&|ky#-2)n1B@L3Bkrbv0a9`({|{4;B4+fRXBwu6Sclr~48}870rR#iYPAot+|RrdccUfQ&w*cBfD4^1W~*T8eHd;>EDBMjc_5Y%7Wt zXM>+18k=V8Dx=_o#ex{hy)Rg1NP;n9$1BU4Wenf!jLc?SHv#z$2yxR~*4te0v*Z8v z03uz_;V0xXf;Kcl)JCP;fvSu&*gxIeSQH;64mYVQ^pOAZH;Bq8|vqZqL`aD8aBOm&N#WRb@W6*$+ zTSfN$KOr|TW4b12O&0&9fHS;_aa-4FUYBegl!0c49OS<;Rm>g!F>UWFY>+`JbNvDO zS#Fz_we(t{r(tnl(h2&zcX+l4FebZ$9-G!_>}PQVItZbb;#^;Ro@VV{Fk5u6iYH+X zx*jOIK-r9W!3MFjlR)q6;}4FaeHgoA4~BXH>HE-64`W#I{M%`&Qm4QAU|0Eb;Z)Q6 zjG3k=>EeOszvW54UjO=BX}g^k3C>+-B$Sy^e=IP!T@5QxFCM5rP8N&y%tBgFh>vP@ z_T*rvO{BTWv2rBI+S0a5hlZCy8uMDMdQ57@#(9X~93?SXI0E_9>7H`gR zp!DELQCZ^o;iBp2CXSnYJ{do_O}wb%@)@V@7ztc^k+|o)da(;Bm%-ak5`3b>@FAN; zr)g3#t@y)8J<4m4G+nDpRk~7<60~vnB)dtJh=_;1hljr!uX5q3Ov8Qw|677$TOe_n zNmm1N4h_~(uqwg5fsYEn3$er*IzzR}BwM7)lnW4DJnksB(65*;lKd$R`X@vmP9wKtb&ti zELtM1NaWO4Pq1Pfy4c{2o=U0*wW|G2ex!?l0{!OzlfdhrYn-3v5E5fog1#mO$Oo)1 z076>UgM-#6!Lwga-J8k2`u-6POWrNv$KnsjNjl1VD(kXDK@`-#% z5TDl{R(l|@+FtS%y7Sl;;{xSWQO+aUCrmT03X%>`Ox398Ja?e6f zzAG-RUJ#s&m%;5pNen1NYEkRD>nI~Fn5vFmn3U_BVcM0zJ+@;RGZen+TfuCI3kDJo z^h;Yv2@hqz+~23YPo*)c@fOA6YKw)Bw?`3EERXRLp905U9#X5ZwrnJ9q_7O$Pe=gqhz&e|?_ z{L1&pg(>S{KI`-+U8k$nHSQ%$ZH33Zf--AXaI+r~$#b{X3JLAx*qst$mg zlIt=dwYMC3T~`%T%d0k~D5l#%zvvNOZSM2w9%pmRl(l(5ywlmd%e_hKdLMP?I6^wm zPfaJ1KhMtvD!2_=lIy?rhqEj&HAEl-HzBsIhs@FX^?RE$Qy(ID0@WISLPXCV(U_cn z5W{OPtx?u8)i8H7{JApkWJN95G23&EnY^O%o*_c5=>(77{aWW4qEh6OVG1v9sY|-- z${d^j+VQemRx-{hTTouRz@Sd;E6bCM9iEl&yAQSl#XOt&=f%dVbtJW+|#S?fJhOp9z0^yKuxHcg|@foMbM0HNY%? zD0BgAtXzBb_mkx>MzsH}>1REgw9HE}AVHVUQI%=mfpogT#eN`Sstx1fM!{Wwb0tm= z^UGK(NyO7p+v4&|o6xube05Di?j>brs?)7bAv*|DA*0rsM0-QR6QXIqQ2g4Ly|eAV z*#;JnDx(Vm*M?a?mPlvbn1p-^Q`YY&JuMrzV0a-tA3PS#z((BH(xbeLuCvEZ>K<3G z?C+b>BMnfB8UCTL3xn5%R!Fg43MH?O%}rg@z{l3}B0AF&_E_lj#;Xutz8Q(Q)|FSh$#$w|L|&|3Z@FAhrYY) zt%$OW0<~%wm$HN8dxY3ya*O^&`Q2}O*S~kQLPRsTr|{1-n>xo-i=K3G=2_F?gbBr>KfqlM)o~FhA%o9yT!3 ziYaZcPz_hE{1YWlensY6+c-^u5~%GoKbT8n4}t9hP?xT|1$ye8>?JI?V<%#F#OWj8 zIdhA>BGgtdxVJ}{oqzwDr4u2;g8pVJ>5J={)Qj)=roJcUJL^624rh+^U41tv^Z=?i} zoDRhHN-HU1^4j4^Y2Eoox=RgE_jz35T|dopzRSkla?wFAvU<&q_`Wo|@+DAwbObDD z@!_Xl!%JOS4%)MLq3pngRnltY^etN2Ngx18u>8u!oO+3xj|+}_gL&U&4Ks)WGG))K z3ifa@_FAQE8(*Yr(K#rlD$@j$G4R8FiJ2l6-U>aShs^UN!2#KbrHwfg3g$Vun^c|G zVS#B0hrVx&yQJvhFA*K_v1%XhNTq3*gKyj;3dB%i4d<98=v!D}P#3N0?nf#t!B5~* z4gtG*S+ex8F!V&HauCaQgl0W%z_Qn_SD^6eL+g9DSid@3yU( zcrjP|?jONW%d6K%n()u@`|H&M9pOUY0TPxS7=0-oGT{~wEgkc$yZ!3AJ}bZB)QVg- z?qs&cs~pnV0sEZ7k&aQC%wVqFeqVyy(!gc7e(|Fy>G$&gni!snz<|0xtD|$YHvo8U z9MGIbG!6}I2HjOy^3#9pI4EDVSzsvPcC&J@sK`tFvBeY7Z-v%wXB26>?iu3TL4N0B za?g?|=rnT=j8#DH=_SDPL1+#GQ8n-mi173#dm0bW&E<~tYq~buf72iSk|$gszxVWs z8KYX|J5|lzqg=FmpbESg4zoIRFtlRz9Q8oV{vidE;`e&|}6Qzq!xJpGj zh#J>u%$(BJ5pC4~Rr%-01`M7>8k`E&@n)&R(NjgbjVVivj)FignnkR2b@`x6g*Y#VbDtbm%5X2tfDRg6^$Xnio6aI^sLLb?<>P5W-4=j)N$^aUjGYvT8gqzOhtPmL{ZWD9kDMVd=JYF@1`w$-!eP zsrfZ>S25C#3XQp4^)Z1w%&@$Fvi7tx@}<_pgLFYQpP9F5%3(cGxrrf4Zcp>Ta*BKg zZXsLn%SisE`KD`KmI#-qToL=}@YK4a5+CC??i+agqsUriF74$%XD;v$YLtH$TjOqo z6rA(Sd?9YcD_e5?p<$cP@$2J`8xEGq9+Q%gjg>0{F*#1)9CP^z$?Ride?q7t zn^=~|-#pL2E(n3xiltZ~#++!@Ks^02eA$e`=75Mu+dWik=zEn8TU2dh*o<{Pmeshz=}HcQaN0C&NfDHHj**@-38Y^(mchk3Ak*G9~*P9$dK$m-hQr~&C;BY%{Yo2+%zO|yM`hlI`Yu$~Y6r!iV@!trBgTu=bM}e-bGIQ|BibU0`Tp|%KT41?vmKe^k>-dFywctqC-;t+hQkTwh& z)it}YdX~Sp*+VJa!k@eMqUSw}*=UAT7Y2V4Eti?V5@1e&ARnuz zrmN$F;IPm2ZLt7`q|xlsy6Woar^-3kZfUmV{l9t;TEQt+=pF4}z1)97xbfiBn61S8 z>D}Lq2>*0-|AR&UFL?;LhZE>z?e8SR3GHpz(s@wM0&-#UFoiur>ylp&;4qoar>$z<*$bBR?#xV7Lqc=^XU_irt~EUJmerXCc8wmD zg04ZJarQGex;X>V_cV3-Cxk!HWhsIdI_#y@oar&Lqf-sm@i@$#y6If0W$Z3*${s9# zI}aQ91G?!~Ve3EH69yDv0^pYIbu1)+SzZr;%wKy+SNAyAcm&iGu6(HQXJGAfOiVqS z8C@RL0VY>q;m-rdm-A7onYhbMOvKY&Q~a(HtJQ_SD(!?K%Mp|_@Sj+IJ}p%g{EzJz zek)=G;g;caLsU1mtm@G+f)+E903s*(CMq$xuBQV1S8YEHe-BKZBpifw3$1`nCEqyZ z{EJg3O72RZtl@fu7J>1x`rw?t_~#(B(8U7>cvu@9e46cA{ILA~S#$mQ1svbY&lr)G z1)NBxoNVa}m<#h$ZO9aHY?`*iOoO{)Vs~bQrJ64bHp5gYv^;W`nM|&$M*X{*ilq*Xr)6-9f#W6sAg+62Hbaw0pTEs)t7$s2h3^C(TBz zKZ7EoLCNB!DysfEDYq3KB@lcFf4?#X6X{$=nON?se_M5=va)6fr{FnBG_~C=Cu|ZV zp_6$0afQBi@8O0sFLaJ_Ba#px;nWoohTSI*rt?Zq5rQhu{J0J?kOSnQ`h#x01nsYQ z?(#a_#zbLg93;W%w(jv+cf&Q#&>3FVtW3`H@Ui5 zKrr3%Q|+g~nS`59hns|QlfUT7-4s{f*RoM;q0cPGJ0YGy*($sVSe8H{26T(SJXBiI z|5UiRbC+fqjXm1ez(K&@<@Rrd+lcsyx ziu5H->g)~*HVa>`30HNp9$Mk=s>>y~I6ocR7i$nS>l`bc@p6=O4|Rg7fPIJzrSAp{ zSIHFbY@ z|B#gE+QDE3O*GPGMpcamBUg9&m~s>)XU0BF^t5z8x@_z2H(hc;nHm zM8YoD8P_I5{wM*0&ksv#?ouYzf;6%Wb{F^j>PTqD>VnCXQ|0Y#8wo* zxjobb1$OxHk;pwoAT;c2uXx8_v$E`9MKYO zHr70aX_Y&tA|1Cj&^&B4d9Yh70|t*F=`6a9R%Xk_n7QV0t0@=HDI`iES1tUDNXk(rGnplY)Bp)?PPBu}C>C!Mm0=GkkW&TFQIk9-g0b|*)AOvOgLhSeRQL%8 z^LrjhKRN>lp8R2}_Q(*zwEo>BgcN!|Lm#f?r@UzhZU-yEkzx`ReH&a{Kg!&Ny9E;N4o!O+~b5 zV!1>utf~UB#_JN%m+E;5jmP3+F*4Sd^Xk(hdkw6u4D@%s>`Qp)d<&+aAyC~l?4VqP1&ctq z6E=l-T1n*43YlWEwRVw(l+I4Tuhb0tMvqF>r%y7+(qRCA-nN?LJxt;#{MB>i= zdUwWqn_xs6jk2F1NDV<+4yJWBO*id?Q6*!wGE%c@?EY?i$ysYVKmb z=cKz<(J0KyKJ0!)PsZo_U(^_#*ju=8m@ums6wrl(xdkBI-hhUp+BLpu3`bDLNe0ne zm{s3#gIFc5cCaOSThA$DBJQ28zjI}r?w!%N(|5jp$rPg3|8ZF-P2-o_>1R=0tTW}H zr!c%NxB!USGG+CeaRK&?gkpT?oYT=v(nv36721h7 zw#&DOm+T#LC|d2AeDP+dHnf?=J*XmwE#=sqWcP|j@GI}PACEjaq44#~%ZL{%p)Df~ z#EQ3$M*t~$C~Bx)i}?=R*@^IJIRyfk0goXkZm#8C;v~*nxTx*802WcMf}vS@FUY&N+D|R0BmpIfcdb>xV`9=A5}Hs!py9LSq&s|b&BR4qg{px5@|w0 ziP~r|=592Cds_lh0d#ld1&VhQ2CxifZ$GHps4*9@kqW=+;J;{$X}#~tz0iAIdt8UF z*$ zsCbynQcvPNbBs8H>ozOC1sw2nb@G3!<3(E+dQxv^Slb}j@9Rx~(t4ShAlWSv#mRgv zLIUkmxGB+X0`&sJIIVyTca7L|vcjbwG`pDoac1Jm*jd2oDQi+Cp?~I6gyt_{FSznt zabJbtt_Z>^&78`$+($P>3iqL)j(|7`*H%dtjM~8R5GFhG7JHjXN{8wuir#+cIcmiv zT7I+N&3wj4BAwGGTyb)Y8bV_L-=|i`-5004il#A7i3ha!MQIUS;T7;m&7;gS2d)d9M)iEAt14acQT@u{c1(46-){dAZmf%2$8Ft)7e86S>UL1M{Eo9n(*q(5DQe zJiVFOkk=_=#aE0yNy@EzTCSQ}oDok9oe+EuVj~coAaYr>0*@hb!d<;1@dr1#1TVX& z#g?Pl+-V%SK%9q7+`ioEsI@Po^f#YlrG-7L9~s0-aJIY`p9CR3>{JYccVG%10|Nh0 zfMz~`ZRP%fEx(f;5;7gMyy_WFJ9B-W7JTVZYs^QRS2|9-N-1UB;^!pt_LZa5!6l;N7rMTVi_$i_6D0(N93#xcl>JZJkTA>Z5OXL-_8}`Z^@;P3XtAc0f<_ QZ>ao+B>yj8cl{juUustNoB#j- literal 0 HcmV?d00001 diff --git a/tests/test_models/test_vision_transformer/test_vit_2d/test_vit_2d.py b/tests/test_models/test_vision_transformer/test_vit_2d/test_vit_2d.py new file mode 100644 index 000000000..fb32bea49 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2d/test_vit_2d.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2d.py') + + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output[0], + ParallelMode.PARALLEL_2D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2D_COL, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label[0] == output) + correct_sum += correct + total_sum += label[0].size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + logger.info('start training') + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2d_parallel_vision_transformer() diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/2p5d.py b/tests/test_models/test_vision_transformer/test_vit_2p5d/2p5d.py new file mode 100644 index 000000000..1a576d039 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/2p5d.py @@ -0,0 +1,88 @@ +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py') + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output[0], + ParallelMode.PARALLEL_2P5D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_COL, + 0, + ) + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_DEP, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label[0] == output) + correct_sum += correct + total_sum += label[0].size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2p5d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + logger.info('start training') + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2p5d_parallel_vision_transformer() \ No newline at end of file diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3.txt new file mode 100644 index 000000000..54ecbf869 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3.txt @@ -0,0 +1,103 @@ +TACC: Starting up job 3498212 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +epoch: 0, train loss: 1.9590576728995965 +epoch: 1, train loss: 1.6275222167676808 +epoch: 1, eval loss: 1.5277319371700286, correct: 4435, total: 10000, acc = 0.44349998235702515 +epoch: 2, train loss: 1.4355541419009774 +epoch: 3, train loss: 1.3253967445723864 +epoch: 3, eval loss: 1.309086227416992, correct: 5283, total: 10000, acc = 0.5282999873161316 +epoch: 4, train loss: 1.2578775298838714 +epoch: 5, train loss: 1.2231916554120121 +epoch: 5, eval loss: 1.1699816286563873, correct: 5695, total: 10000, acc = 0.5694999694824219 +epoch: 6, train loss: 1.1872552669778162 +epoch: 7, train loss: 1.1616783823285783 +epoch: 7, eval loss: 1.069484794139862, correct: 6183, total: 10000, acc = 0.6182999610900879 +epoch: 8, train loss: 1.1155579333402672 +epoch: 9, train loss: 1.0878059365311448 +epoch: 9, eval loss: 1.0522838592529298, correct: 6202, total: 10000, acc = 0.620199978351593 +epoch: 10, train loss: 1.0780728623575093 +epoch: 11, train loss: 1.0522098152004942 +epoch: 11, eval loss: 1.0902862310409547, correct: 6148, total: 10000, acc = 0.614799976348877 +epoch: 12, train loss: 1.0366473337825464 +epoch: 13, train loss: 1.0067467458394108 +epoch: 13, eval loss: 0.9696728616952897, correct: 6531, total: 10000, acc = 0.6530999541282654 +epoch: 14, train loss: 0.9676224273078295 +epoch: 15, train loss: 0.9494374029490412 +epoch: 15, eval loss: 0.9511896312236786, correct: 6646, total: 10000, acc = 0.6645999550819397 +epoch: 16, train loss: 0.9231320935852674 +epoch: 17, train loss: 0.9023846679804276 +epoch: 17, eval loss: 0.8728409796953202, correct: 6866, total: 10000, acc = 0.6865999698638916 +epoch: 18, train loss: 0.8684309854799387 +epoch: 19, train loss: 0.836099565637355 +epoch: 19, eval loss: 0.8208363801240921, correct: 7091, total: 10000, acc = 0.7091000080108643 +epoch: 20, train loss: 0.8285067890371595 +epoch: 21, train loss: 0.7930980793067387 +epoch: 21, eval loss: 0.7793890535831451, correct: 7235, total: 10000, acc = 0.7234999537467957 +epoch: 22, train loss: 0.762698369366782 +epoch: 23, train loss: 0.7376812471418964 +epoch: 23, eval loss: 0.746866625547409, correct: 7340, total: 10000, acc = 0.7339999675750732 +epoch: 24, train loss: 0.7071484223920472 +epoch: 25, train loss: 0.6905171658311572 +epoch: 25, eval loss: 0.6909466415643692, correct: 7526, total: 10000, acc = 0.7525999546051025 +epoch: 26, train loss: 0.6608500091397033 +epoch: 27, train loss: 0.65504517907999 +epoch: 27, eval loss: 0.6612646311521531, correct: 7697, total: 10000, acc = 0.7696999907493591 +epoch: 28, train loss: 0.6234641969203949 +epoch: 29, train loss: 0.6107665622720913 +epoch: 29, eval loss: 0.666494044661522, correct: 7704, total: 10000, acc = 0.7703999876976013 +epoch: 30, train loss: 0.5875011883219894 +epoch: 31, train loss: 0.5739485697478665 +epoch: 31, eval loss: 0.6217960953712464, correct: 7828, total: 10000, acc = 0.7827999591827393 +epoch: 32, train loss: 0.548510205684876 +epoch: 33, train loss: 0.5237194764979032 +epoch: 33, eval loss: 0.6254391580820083, correct: 7842, total: 10000, acc = 0.7841999530792236 +epoch: 34, train loss: 0.5154265892140719 +epoch: 35, train loss: 0.494700480176478 +epoch: 35, eval loss: 0.5981663644313813, correct: 7963, total: 10000, acc = 0.7962999939918518 +epoch: 36, train loss: 0.4785171020395902 +epoch: 37, train loss: 0.46277919259606576 +epoch: 37, eval loss: 0.6061880439519882, correct: 7958, total: 10000, acc = 0.795799970626831 +epoch: 38, train loss: 0.4398626606075131 +epoch: 39, train loss: 0.4206806777083144 +epoch: 39, eval loss: 0.6158866941928863, correct: 7959, total: 10000, acc = 0.7958999872207642 +epoch: 40, train loss: 0.40768756550185536 +epoch: 41, train loss: 0.39494050035671313 +epoch: 41, eval loss: 0.5725498422980309, correct: 8132, total: 10000, acc = 0.8131999969482422 +epoch: 42, train loss: 0.3742571521778496 +epoch: 43, train loss: 0.3583034301290707 +epoch: 43, eval loss: 0.5765605017542839, correct: 8155, total: 10000, acc = 0.8154999613761902 +epoch: 44, train loss: 0.3342630756752832 +epoch: 45, train loss: 0.31316718063792404 +epoch: 45, eval loss: 0.583588008582592, correct: 8199, total: 10000, acc = 0.8198999762535095 +epoch: 46, train loss: 0.30922748148441315 +epoch: 47, train loss: 0.2906164434187266 +epoch: 47, eval loss: 0.5934860140085221, correct: 8143, total: 10000, acc = 0.814300000667572 +epoch: 48, train loss: 0.2741488078419043 +epoch: 49, train loss: 0.2597196321098172 +epoch: 49, eval loss: 0.5978868633508683, correct: 8195, total: 10000, acc = 0.8194999694824219 +epoch: 50, train loss: 0.2440016470393356 +epoch: 51, train loss: 0.2293997729311184 +epoch: 51, eval loss: 0.5915440261363983, correct: 8232, total: 10000, acc = 0.823199987411499 +epoch: 52, train loss: 0.2132072006257213 +epoch: 53, train loss: 0.19785404767917128 +epoch: 53, eval loss: 0.6171442106366157, correct: 8258, total: 10000, acc = 0.8258000016212463 +epoch: 54, train loss: 0.1838149410121295 +epoch: 55, train loss: 0.17691133977199086 +epoch: 55, eval loss: 0.623777586221695, correct: 8275, total: 10000, acc = 0.8274999856948853 +epoch: 56, train loss: 0.16595362697024735 +epoch: 57, train loss: 0.1531825682946614 +epoch: 57, eval loss: 0.6466041743755341, correct: 8243, total: 10000, acc = 0.8242999911308289 +epoch: 58, train loss: 0.14334788979316243 +epoch: 59, train loss: 0.13799503377201605 +epoch: 59, eval loss: 0.6496601745486259, correct: 8249, total: 10000, acc = 0.8248999714851379 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3hxmodel.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3hxmodel.txt new file mode 100644 index 000000000..9bb1bf4bb --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-3hxmodel.txt @@ -0,0 +1,196 @@ + +c196-011[rtx](1013)$ bash ./test.sh 1 1 1 0.001 +TACC: Starting up job 3503164 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +USE_VANILLA model +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +epoch: 0, train loss: 1.9408839624755236 +epoch: 0, eval loss: 1.7896566271781922, correct: 3488, total: 10000, acc = 0.34880000352859497 +epoch time: 40.82966494560242 +epoch: 1, train loss: 1.6500030257263962 +epoch: 1, eval loss: 1.5464953780174255, correct: 4545, total: 10000, acc = 0.4544999897480011 +epoch time: 40.01254224777222 +epoch: 2, train loss: 1.422887429899099 +epoch: 2, eval loss: 1.37536381483078, correct: 5074, total: 10000, acc = 0.5073999762535095 +epoch time: 40.107905864715576 +epoch: 3, train loss: 1.3217590207956276 +epoch: 3, eval loss: 1.3036327004432677, correct: 5377, total: 10000, acc = 0.5376999974250793 +epoch time: 40.12306189537048 +epoch: 4, train loss: 1.262234352072891 +epoch: 4, eval loss: 1.2568134129047395, correct: 5475, total: 10000, acc = 0.5475000143051147 +epoch time: 40.10755228996277 +epoch: 5, train loss: 1.2381379117771072 +epoch: 5, eval loss: 1.1941023647785187, correct: 5676, total: 10000, acc = 0.5676000118255615 +epoch time: 40.119303464889526 +epoch: 6, train loss: 1.2061052650821453 +epoch: 6, eval loss: 1.1313925206661224, correct: 5938, total: 10000, acc = 0.5938000082969666 +epoch time: 40.07719683647156 +epoch: 7, train loss: 1.1659562563409611 +epoch: 7, eval loss: 1.125486546754837, correct: 5958, total: 10000, acc = 0.59579998254776 +epoch time: 40.1702299118042 +epoch: 8, train loss: 1.1378972846634534 +epoch: 8, eval loss: 1.082760637998581, correct: 6102, total: 10000, acc = 0.6101999878883362 +epoch time: 40.22099733352661 +epoch: 9, train loss: 1.1073276430976635 +epoch: 9, eval loss: 1.1077564001083373, correct: 6038, total: 10000, acc = 0.6037999987602234 +epoch time: 40.1106858253479 +epoch: 10, train loss: 1.087894769347444 +epoch: 10, eval loss: 1.0400531351566316, correct: 6311, total: 10000, acc = 0.6310999989509583 +epoch time: 40.20973324775696 +epoch: 11, train loss: 1.0556547295074075 +epoch: 11, eval loss: 1.0295817345380782, correct: 6359, total: 10000, acc = 0.6358999609947205 +epoch time: 40.23791980743408 +epoch: 12, train loss: 1.0299884901971232 +epoch: 12, eval loss: 1.003737959265709, correct: 6380, total: 10000, acc = 0.6380000114440918 +epoch time: 40.08779859542847 +epoch: 13, train loss: 0.9972386627781148 +epoch: 13, eval loss: 0.9707699298858643, correct: 6499, total: 10000, acc = 0.649899959564209 +epoch time: 40.10878801345825 +epoch: 14, train loss: 0.9784559072280417 +epoch: 14, eval loss: 0.9253897607326508, correct: 6641, total: 10000, acc = 0.6640999913215637 +epoch time: 40.13168978691101 +epoch: 15, train loss: 0.9409253481699495 +epoch: 15, eval loss: 0.9120320588350296, correct: 6759, total: 10000, acc = 0.6758999824523926 +epoch time: 40.162830114364624 +epoch: 16, train loss: 0.925923115136672 +epoch: 16, eval loss: 0.8850776582956315, correct: 6870, total: 10000, acc = 0.6869999766349792 +epoch time: 40.145774602890015 +epoch: 17, train loss: 0.8923340841215484 +epoch: 17, eval loss: 0.8570599347352982, correct: 6950, total: 10000, acc = 0.6949999928474426 +epoch time: 40.18058943748474 +epoch: 18, train loss: 0.8638542884466599 +epoch: 18, eval loss: 0.838410159945488, correct: 6971, total: 10000, acc = 0.6970999836921692 +epoch time: 40.110822439193726 +epoch: 19, train loss: 0.8400422529298432 +epoch: 19, eval loss: 0.8189669162034988, correct: 7097, total: 10000, acc = 0.7096999883651733 +epoch time: 40.066970109939575 +epoch: 20, train loss: 0.8072922752828015 +epoch: 20, eval loss: 0.7772788077592849, correct: 7240, total: 10000, acc = 0.7239999771118164 +epoch time: 40.045086145401 +epoch: 21, train loss: 0.788195074821005 +epoch: 21, eval loss: 0.7793144911527634, correct: 7261, total: 10000, acc = 0.726099967956543 +epoch time: 40.05983781814575 +epoch: 22, train loss: 0.7574447350842612 +epoch: 22, eval loss: 0.7660320281982422, correct: 7272, total: 10000, acc = 0.7271999716758728 +epoch time: 40.11693739891052 +epoch: 23, train loss: 0.7402738150285215 +epoch: 23, eval loss: 0.7264292597770691, correct: 7418, total: 10000, acc = 0.7418000102043152 +epoch time: 40.18724513053894 +epoch: 24, train loss: 0.7125097580102026 +epoch: 24, eval loss: 0.7105035990476608, correct: 7506, total: 10000, acc = 0.7505999803543091 +epoch time: 40.1254940032959 +epoch: 25, train loss: 0.6900304744438249 +epoch: 25, eval loss: 0.6911167114973068, correct: 7562, total: 10000, acc = 0.7561999559402466 +epoch time: 40.103896617889404 +epoch: 26, train loss: 0.6648721482072558 +epoch: 26, eval loss: 0.6780407190322876, correct: 7624, total: 10000, acc = 0.7623999714851379 +epoch time: 40.18161463737488 +epoch: 27, train loss: 0.6446310062797702 +epoch: 27, eval loss: 0.6820667266845704, correct: 7612, total: 10000, acc = 0.761199951171875 +epoch time: 40.19018864631653 +epoch: 28, train loss: 0.6262476389505425 +epoch: 28, eval loss: 0.6506347745656967, correct: 7704, total: 10000, acc = 0.7703999876976013 +epoch time: 40.23526978492737 +epoch: 29, train loss: 0.5968854001590184 +epoch: 29, eval loss: 0.6507940381765366, correct: 7727, total: 10000, acc = 0.7726999521255493 +epoch time: 40.26889181137085 +epoch: 30, train loss: 0.587430303194085 +epoch: 30, eval loss: 0.6333519726991653, correct: 7788, total: 10000, acc = 0.7787999510765076 +epoch time: 40.28285789489746 +epoch: 31, train loss: 0.5701514035463333 +epoch: 31, eval loss: 0.6348810195922852, correct: 7799, total: 10000, acc = 0.7798999547958374 +epoch time: 40.199995040893555 +epoch: 32, train loss: 0.5482188679125845 +epoch: 32, eval loss: 0.6192457497119903, correct: 7833, total: 10000, acc = 0.78329998254776 +epoch time: 40.270729780197144 +epoch: 33, train loss: 0.534268391375639 +epoch: 33, eval loss: 0.6381673783063888, correct: 7790, total: 10000, acc = 0.7789999842643738 +epoch time: 40.36342120170593 +epoch: 34, train loss: 0.5104483384258893 +epoch: 34, eval loss: 0.6173199415206909, correct: 7867, total: 10000, acc = 0.7866999506950378 +epoch time: 40.34266257286072 +epoch: 35, train loss: 0.4968841674984718 +epoch: 35, eval loss: 0.604002220928669, correct: 7916, total: 10000, acc = 0.7915999889373779 +epoch time: 40.39444589614868 +epoch: 36, train loss: 0.4773432207959039 +epoch: 36, eval loss: 0.5884111285209656, correct: 7965, total: 10000, acc = 0.7964999675750732 +epoch time: 40.40647268295288 +epoch: 37, train loss: 0.4621481445370888 +epoch: 37, eval loss: 0.5748852327466011, correct: 8047, total: 10000, acc = 0.8046999573707581 +epoch time: 40.29281520843506 +epoch: 38, train loss: 0.4431859048045411 +epoch: 38, eval loss: 0.5874941781163215, correct: 7995, total: 10000, acc = 0.7994999885559082 +epoch time: 40.40029954910278 +epoch: 39, train loss: 0.4305852785402415 +epoch: 39, eval loss: 0.5991648495197296, correct: 7972, total: 10000, acc = 0.7971999645233154 +epoch time: 40.399904012680054 +epoch: 40, train loss: 0.4092241589512144 +epoch: 40, eval loss: 0.5725525215268135, correct: 8069, total: 10000, acc = 0.8068999648094177 +epoch time: 40.32663059234619 +epoch: 41, train loss: 0.39218547179990887 +epoch: 41, eval loss: 0.5886161357164383, correct: 8068, total: 10000, acc = 0.8068000078201294 +epoch time: 40.32424521446228 +epoch: 42, train loss: 0.3773612398274091 +epoch: 42, eval loss: 0.5762413635849952, correct: 8126, total: 10000, acc = 0.8125999569892883 +epoch time: 40.44430422782898 +epoch: 43, train loss: 0.3593267098981507 +epoch: 43, eval loss: 0.5729024946689606, correct: 8107, total: 10000, acc = 0.810699999332428 +epoch time: 40.488121032714844 +epoch: 44, train loss: 0.3396431426612698 +epoch: 44, eval loss: 0.5944831907749176, correct: 8072, total: 10000, acc = 0.8071999549865723 +epoch time: 40.41803979873657 +epoch: 45, train loss: 0.32412939716358574 +epoch: 45, eval loss: 0.5849291861057282, correct: 8171, total: 10000, acc = 0.8170999884605408 +epoch time: 40.428131341934204 +epoch: 46, train loss: 0.3099915471916296 +epoch: 46, eval loss: 0.5797522723674774, correct: 8121, total: 10000, acc = 0.8120999932289124 +epoch time: 40.623990058898926 +epoch: 47, train loss: 0.29422828676749246 +epoch: 47, eval loss: 0.5898703813552857, correct: 8175, total: 10000, acc = 0.8174999952316284 +epoch time: 40.71224045753479 +epoch: 48, train loss: 0.27581544600579205 +epoch: 48, eval loss: 0.5950756087899208, correct: 8170, total: 10000, acc = 0.8169999718666077 +epoch time: 40.53409385681152 +epoch: 49, train loss: 0.26118586242807157 +epoch: 49, eval loss: 0.5998703584074974, correct: 8213, total: 10000, acc = 0.8212999701499939 +epoch time: 40.564385175704956 +epoch: 50, train loss: 0.2513351797753451 +epoch: 50, eval loss: 0.6011391341686249, correct: 8226, total: 10000, acc = 0.8226000070571899 +epoch time: 40.55033254623413 +epoch: 51, train loss: 0.22965944299892505 +epoch: 51, eval loss: 0.5979882061481476, correct: 8233, total: 10000, acc = 0.8233000040054321 +epoch time: 40.54532980918884 +epoch: 52, train loss: 0.21661002188920975 +epoch: 52, eval loss: 0.6121026620268821, correct: 8220, total: 10000, acc = 0.8219999670982361 +epoch time: 40.649473667144775 +epoch: 53, train loss: 0.20266114950788264 +epoch: 53, eval loss: 0.6016955643892288, correct: 8260, total: 10000, acc = 0.8259999752044678 +epoch time: 40.752054929733276 +epoch: 54, train loss: 0.19287180794136866 +epoch: 54, eval loss: 0.6043265879154205, correct: 8284, total: 10000, acc = 0.8283999562263489 +epoch time: 40.68043255805969 +epoch: 55, train loss: 0.175087109208107 +epoch: 55, eval loss: 0.6146622076630592, correct: 8316, total: 10000, acc = 0.8315999507904053 +epoch time: 40.58446717262268 +epoch: 56, train loss: 0.16749868762432313 +epoch: 56, eval loss: 0.6235148012638092, correct: 8313, total: 10000, acc = 0.8312999606132507 +epoch time: 40.62826180458069 +epoch: 57, train loss: 0.15567801619062618 +epoch: 57, eval loss: 0.6325852945446968, correct: 8308, total: 10000, acc = 0.8307999968528748 +epoch time: 40.72224497795105 +epoch: 58, train loss: 0.1484297229623308 +epoch: 58, eval loss: 0.6329193383455276, correct: 8325, total: 10000, acc = 0.8324999809265137 +epoch time: 40.750558614730835 +epoch: 59, train loss: 0.14238623818572688 +epoch: 59, eval loss: 0.6318104699254036, correct: 8329, total: 10000, acc = 0.8328999876976013 +epoch time: 40.77172636985779 +finish training \ No newline at end of file diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4.txt new file mode 100644 index 000000000..d7404eea6 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4.txt @@ -0,0 +1,103 @@ +TACC: Starting up job 3498663 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +epoch: 0, train loss: 2.095031557034473 +epoch: 1, train loss: 1.8454539605549403 +epoch: 1, eval loss: 1.7768513083457946, correct: 3564, total: 10000, acc = 0.3563999831676483 +epoch: 2, train loss: 1.7044833728245325 +epoch: 3, train loss: 1.5999061124665397 +epoch: 3, eval loss: 1.5574450254440309, correct: 4389, total: 10000, acc = 0.4388999938964844 +epoch: 4, train loss: 1.4929670217085858 +epoch: 5, train loss: 1.401450170546162 +epoch: 5, eval loss: 1.4644017696380616, correct: 4857, total: 10000, acc = 0.48569998145103455 +epoch: 6, train loss: 1.319102376091237 +epoch: 7, train loss: 1.2555806539496597 +epoch: 7, eval loss: 1.2475590467453004, correct: 5486, total: 10000, acc = 0.5485999584197998 +epoch: 8, train loss: 1.1992503173497258 +epoch: 9, train loss: 1.1600336493278036 +epoch: 9, eval loss: 1.1786625683307648, correct: 5834, total: 10000, acc = 0.5834000110626221 +epoch: 10, train loss: 1.1214540807568296 +epoch: 11, train loss: 1.0808329728184913 +epoch: 11, eval loss: 1.096825110912323, correct: 6072, total: 10000, acc = 0.6071999669075012 +epoch: 12, train loss: 1.0521019423494533 +epoch: 13, train loss: 1.0262362957000732 +epoch: 13, eval loss: 1.056444275379181, correct: 6268, total: 10000, acc = 0.626800000667572 +epoch: 14, train loss: 0.9932536555796253 +epoch: 15, train loss: 0.9653559442685575 +epoch: 15, eval loss: 0.9576991081237793, correct: 6582, total: 10000, acc = 0.6581999659538269 +epoch: 16, train loss: 0.9465620943478176 +epoch: 17, train loss: 0.9181081974992946 +epoch: 17, eval loss: 0.9245584070682525, correct: 6747, total: 10000, acc = 0.6746999621391296 +epoch: 18, train loss: 0.8987109752333894 +epoch: 19, train loss: 0.8840238646585115 +epoch: 19, eval loss: 0.8989996433258056, correct: 6787, total: 10000, acc = 0.6786999702453613 +epoch: 20, train loss: 0.8591911811001447 +epoch: 21, train loss: 0.843510093129411 +epoch: 21, eval loss: 0.8595858901739121, correct: 6969, total: 10000, acc = 0.6969000101089478 +epoch: 22, train loss: 0.8306782276046519 +epoch: 23, train loss: 0.8181647640101763 +epoch: 23, eval loss: 0.8600298583507537, correct: 7005, total: 10000, acc = 0.7005000114440918 +epoch: 24, train loss: 0.7964763343334198 +epoch: 25, train loss: 0.7840689718723297 +epoch: 25, eval loss: 0.824479615688324, correct: 7073, total: 10000, acc = 0.7073000073432922 +epoch: 26, train loss: 0.7709570752114666 +epoch: 27, train loss: 0.7591698108887186 +epoch: 27, eval loss: 0.7967212647199631, correct: 7196, total: 10000, acc = 0.7195999622344971 +epoch: 28, train loss: 0.7438001352913526 +epoch: 29, train loss: 0.7341659853653032 +epoch: 29, eval loss: 0.8041222035884857, correct: 7168, total: 10000, acc = 0.7167999744415283 +epoch: 30, train loss: 0.7254330929444761 +epoch: 31, train loss: 0.710246913895315 +epoch: 31, eval loss: 0.7848481118679047, correct: 7287, total: 10000, acc = 0.7286999821662903 +epoch: 32, train loss: 0.6976562008565786 +epoch: 33, train loss: 0.6906438475968887 +epoch: 33, eval loss: 0.7644171923398971, correct: 7370, total: 10000, acc = 0.7369999885559082 +epoch: 34, train loss: 0.6795850834067987 +epoch: 35, train loss: 0.6724951656497254 +epoch: 35, eval loss: 0.7515032321214676, correct: 7368, total: 10000, acc = 0.736799955368042 +epoch: 36, train loss: 0.6527298372619006 +epoch: 37, train loss: 0.651018523440069 +epoch: 37, eval loss: 0.7381327033042908, correct: 7449, total: 10000, acc = 0.7448999881744385 +epoch: 38, train loss: 0.6365304406808348 +epoch: 39, train loss: 0.6372388047831399 +epoch: 39, eval loss: 0.7342826008796692, correct: 7453, total: 10000, acc = 0.7452999949455261 +epoch: 40, train loss: 0.6199644664112403 +epoch: 41, train loss: 0.6101092303894005 +epoch: 41, eval loss: 0.7353240340948105, correct: 7466, total: 10000, acc = 0.7465999722480774 +epoch: 42, train loss: 0.6093496211937496 +epoch: 43, train loss: 0.6019633388032719 +epoch: 43, eval loss: 0.7350291252136231, correct: 7479, total: 10000, acc = 0.7479000091552734 +epoch: 44, train loss: 0.5928211437196148 +epoch: 45, train loss: 0.5840530048827736 +epoch: 45, eval loss: 0.7301350146532058, correct: 7525, total: 10000, acc = 0.7524999976158142 +epoch: 46, train loss: 0.578370426078232 +epoch: 47, train loss: 0.5703256440405943 +epoch: 47, eval loss: 0.7226948082447052, correct: 7526, total: 10000, acc = 0.7525999546051025 +epoch: 48, train loss: 0.5622531275968162 +epoch: 49, train loss: 0.5543749076979501 +epoch: 49, eval loss: 0.7278151929378509, correct: 7536, total: 10000, acc = 0.753600001335144 +epoch: 50, train loss: 0.5494355583677486 +epoch: 51, train loss: 0.5427058047177841 +epoch: 51, eval loss: 0.7180711388587951, correct: 7608, total: 10000, acc = 0.7608000040054321 +epoch: 52, train loss: 0.5323820530760045 +epoch: 53, train loss: 0.5341374232452742 +epoch: 53, eval loss: 0.7136827558279037, correct: 7618, total: 10000, acc = 0.7617999911308289 +epoch: 54, train loss: 0.5295403867351766 +epoch: 55, train loss: 0.5226148692320804 +epoch: 55, eval loss: 0.7158426463603973, correct: 7624, total: 10000, acc = 0.7623999714851379 +epoch: 56, train loss: 0.5206544593888887 +epoch: 57, train loss: 0.5186455438331682 +epoch: 57, eval loss: 0.7141193479299546, correct: 7611, total: 10000, acc = 0.7610999941825867 +epoch: 58, train loss: 0.5130856335163116 +epoch: 59, train loss: 0.5103850683995655 +epoch: 59, eval loss: 0.7077989399433136, correct: 7628, total: 10000, acc = 0.7627999782562256 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4hxmodel.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4hxmodel.txt new file mode 100644 index 000000000..72889a455 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/111log1e-4hxmodel.txt @@ -0,0 +1,195 @@ +c196-012[rtx](1006)$ bash ./test.sh 1 1 1 0.0001 +TACC: Starting up job 3503177 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +USE_VANILLA model +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +epoch: 0, train loss: 2.07912605757616 +epoch: 0, eval loss: 1.9337591707706452, correct: 2845, total: 10000, acc = 0.28450000286102295 +epoch time: 48.79993748664856 +epoch: 1, train loss: 1.8506990890113675 +epoch: 1, eval loss: 1.7832269430160523, correct: 3506, total: 10000, acc = 0.350600004196167 +epoch time: 39.10968255996704 +epoch: 2, train loss: 1.707400695401795 +epoch: 2, eval loss: 1.6983122050762176, correct: 3935, total: 10000, acc = 0.3935000002384186 +epoch time: 39.205119609832764 +epoch: 3, train loss: 1.5925798574272467 +epoch: 3, eval loss: 1.6361137092113496, correct: 4276, total: 10000, acc = 0.4275999963283539 +epoch time: 39.220152378082275 +epoch: 4, train loss: 1.4817699790000916 +epoch: 4, eval loss: 1.4869949519634247, correct: 4706, total: 10000, acc = 0.4705999791622162 +epoch time: 39.297648191452026 +epoch: 5, train loss: 1.3685331247290786 +epoch: 5, eval loss: 1.4110832333564758, correct: 5043, total: 10000, acc = 0.5042999982833862 +epoch time: 39.31484127044678 +epoch: 6, train loss: 1.283743022655954 +epoch: 6, eval loss: 1.317776972055435, correct: 5320, total: 10000, acc = 0.5320000052452087 +epoch time: 39.31891870498657 +epoch: 7, train loss: 1.2292176107971036 +epoch: 7, eval loss: 1.2397323846817017, correct: 5619, total: 10000, acc = 0.5618999600410461 +epoch time: 39.31014013290405 +epoch: 8, train loss: 1.1705418606193698 +epoch: 8, eval loss: 1.2041720151901245, correct: 5696, total: 10000, acc = 0.569599986076355 +epoch time: 39.29190945625305 +epoch: 9, train loss: 1.1253369718181843 +epoch: 9, eval loss: 1.1219275832176208, correct: 6039, total: 10000, acc = 0.6038999557495117 +epoch time: 39.314892053604126 +epoch: 10, train loss: 1.0875617825255102 +epoch: 10, eval loss: 1.1398449420928956, correct: 5921, total: 10000, acc = 0.5920999646186829 +epoch time: 39.29768466949463 +epoch: 11, train loss: 1.055325626110544 +epoch: 11, eval loss: 1.0739773243665696, correct: 6212, total: 10000, acc = 0.6211999654769897 +epoch time: 39.26834416389465 +epoch: 12, train loss: 1.0238730627663282 +epoch: 12, eval loss: 1.0526267528533935, correct: 6244, total: 10000, acc = 0.6243999600410461 +epoch time: 39.30522894859314 +epoch: 13, train loss: 0.9906492087305808 +epoch: 13, eval loss: 1.0342225402593612, correct: 6295, total: 10000, acc = 0.6294999718666077 +epoch time: 39.28985071182251 +epoch: 14, train loss: 0.968360669758855 +epoch: 14, eval loss: 0.9747557610273361, correct: 6498, total: 10000, acc = 0.6498000025749207 +epoch time: 39.33563685417175 +epoch: 15, train loss: 0.9413909072778663 +epoch: 15, eval loss: 0.9359912216663361, correct: 6659, total: 10000, acc = 0.6658999919891357 +epoch time: 39.332377672195435 +epoch: 16, train loss: 0.9215109226654987 +epoch: 16, eval loss: 0.9215879321098328, correct: 6693, total: 10000, acc = 0.6692999601364136 +epoch time: 39.35148882865906 +epoch: 17, train loss: 0.9036085179873875 +epoch: 17, eval loss: 0.8947311192750931, correct: 6787, total: 10000, acc = 0.6786999702453613 +epoch time: 39.31995511054993 +epoch: 18, train loss: 0.8774841433885147 +epoch: 18, eval loss: 0.8880111247301101, correct: 6844, total: 10000, acc = 0.6843999624252319 +epoch time: 39.32100558280945 +epoch: 19, train loss: 0.8607137598553483 +epoch: 19, eval loss: 0.8770220369100571, correct: 6883, total: 10000, acc = 0.6882999539375305 +epoch time: 39.3321533203125 +epoch: 20, train loss: 0.8482279163234088 +epoch: 20, eval loss: 0.8661656975746155, correct: 6926, total: 10000, acc = 0.6926000118255615 +epoch time: 39.319167613983154 +epoch: 21, train loss: 0.8280732814146547 +epoch: 21, eval loss: 0.8369802534580231, correct: 7041, total: 10000, acc = 0.7040999531745911 +epoch time: 39.32543706893921 +epoch: 22, train loss: 0.8162973212952517 +epoch: 22, eval loss: 0.8281545102596283, correct: 7096, total: 10000, acc = 0.7095999717712402 +epoch time: 39.344929695129395 +epoch: 23, train loss: 0.8043988426120914 +epoch: 23, eval loss: 0.8369941651821137, correct: 7070, total: 10000, acc = 0.7069999575614929 +epoch time: 39.342397928237915 +epoch: 24, train loss: 0.788704516328111 +epoch: 24, eval loss: 0.8305304765701294, correct: 7040, total: 10000, acc = 0.7039999961853027 +epoch time: 39.349589347839355 +epoch: 25, train loss: 0.7747861517935383 +epoch: 25, eval loss: 0.8025588423013688, correct: 7164, total: 10000, acc = 0.7163999676704407 +epoch time: 39.35692596435547 +epoch: 26, train loss: 0.7557641073149077 +epoch: 26, eval loss: 0.7929455429315567, correct: 7204, total: 10000, acc = 0.7203999757766724 +epoch time: 39.36091661453247 +epoch: 27, train loss: 0.7422851062550837 +epoch: 27, eval loss: 0.7790816932916641, correct: 7249, total: 10000, acc = 0.7249000072479248 +epoch time: 39.355828046798706 +epoch: 28, train loss: 0.7305653861590794 +epoch: 28, eval loss: 0.7937072366476059, correct: 7204, total: 10000, acc = 0.7203999757766724 +epoch time: 39.3598473072052 +epoch: 29, train loss: 0.719313730998915 +epoch: 29, eval loss: 0.7657937437295914, correct: 7320, total: 10000, acc = 0.7319999933242798 +epoch time: 39.353551626205444 +epoch: 30, train loss: 0.7127084263733455 +epoch: 30, eval loss: 0.7556168884038925, correct: 7341, total: 10000, acc = 0.7340999841690063 +epoch time: 39.37097501754761 +epoch: 31, train loss: 0.7044506967067719 +epoch: 31, eval loss: 0.7438590109348298, correct: 7359, total: 10000, acc = 0.7358999848365784 +epoch time: 39.37364745140076 +epoch: 32, train loss: 0.6920064693810989 +epoch: 32, eval loss: 0.7408553540706635, correct: 7419, total: 10000, acc = 0.7418999671936035 +epoch time: 39.372353076934814 +epoch: 33, train loss: 0.6790882920732304 +epoch: 33, eval loss: 0.7541307628154754, correct: 7332, total: 10000, acc = 0.733199954032898 +epoch time: 39.310251235961914 +epoch: 34, train loss: 0.6666433202977083 +epoch: 34, eval loss: 0.7413494348526001, correct: 7401, total: 10000, acc = 0.7400999665260315 +epoch time: 39.394805908203125 +epoch: 35, train loss: 0.6561720742254841 +epoch: 35, eval loss: 0.7245241671800613, correct: 7483, total: 10000, acc = 0.7482999563217163 +epoch time: 39.34455704689026 +epoch: 36, train loss: 0.6433814526820669 +epoch: 36, eval loss: 0.7294039458036423, correct: 7483, total: 10000, acc = 0.7482999563217163 +epoch time: 39.337549924850464 +epoch: 37, train loss: 0.6366085136423305 +epoch: 37, eval loss: 0.7336494833230972, correct: 7462, total: 10000, acc = 0.7461999654769897 +epoch time: 39.338196754455566 +epoch: 38, train loss: 0.6294400272320728 +epoch: 38, eval loss: 0.719609409570694, correct: 7532, total: 10000, acc = 0.7531999945640564 +epoch time: 39.33430027961731 +epoch: 39, train loss: 0.6179663903859197 +epoch: 39, eval loss: 0.7210630685091018, correct: 7507, total: 10000, acc = 0.7506999969482422 +epoch time: 39.33643341064453 +epoch: 40, train loss: 0.6102935781284254 +epoch: 40, eval loss: 0.6994094282388688, correct: 7569, total: 10000, acc = 0.7568999528884888 +epoch time: 39.38672637939453 +epoch: 41, train loss: 0.5990810029360712 +epoch: 41, eval loss: 0.7133035778999328, correct: 7550, total: 10000, acc = 0.7549999952316284 +epoch time: 39.374757528305054 +epoch: 42, train loss: 0.5964441865074391 +epoch: 42, eval loss: 0.7060712993144989, correct: 7577, total: 10000, acc = 0.7576999664306641 +epoch time: 39.4019033908844 +epoch: 43, train loss: 0.5878602710305428 +epoch: 43, eval loss: 0.7106044471263886, correct: 7580, total: 10000, acc = 0.7579999566078186 +epoch time: 39.408252477645874 +epoch: 44, train loss: 0.5797601254010687 +epoch: 44, eval loss: 0.7093768745660782, correct: 7568, total: 10000, acc = 0.7567999958992004 +epoch time: 39.40289378166199 +epoch: 45, train loss: 0.5684604742089097 +epoch: 45, eval loss: 0.7075642883777619, correct: 7612, total: 10000, acc = 0.761199951171875 +epoch time: 39.35792422294617 +epoch: 46, train loss: 0.5617077308041709 +epoch: 46, eval loss: 0.707081851363182, correct: 7576, total: 10000, acc = 0.7576000094413757 +epoch time: 39.37784481048584 +epoch: 47, train loss: 0.5572127462649832 +epoch: 47, eval loss: 0.7069586098194123, correct: 7606, total: 10000, acc = 0.7605999708175659 +epoch time: 39.33794188499451 +epoch: 48, train loss: 0.5519619742218329 +epoch: 48, eval loss: 0.6923990368843078, correct: 7679, total: 10000, acc = 0.7678999900817871 +epoch time: 39.39500594139099 +epoch: 49, train loss: 0.5454421751961416 +epoch: 49, eval loss: 0.7032370567321777, correct: 7626, total: 10000, acc = 0.7626000046730042 +epoch time: 39.38570594787598 +epoch: 50, train loss: 0.5419908360559114 +epoch: 50, eval loss: 0.6949253618717194, correct: 7669, total: 10000, acc = 0.7669000029563904 +epoch time: 39.334325551986694 +epoch: 51, train loss: 0.5299993215166793 +epoch: 51, eval loss: 0.6966427147388459, correct: 7654, total: 10000, acc = 0.7653999924659729 +epoch time: 39.337984561920166 +epoch: 52, train loss: 0.5282451452649369 +epoch: 52, eval loss: 0.6932955116033555, correct: 7664, total: 10000, acc = 0.7663999795913696 +epoch time: 39.34237813949585 +epoch: 53, train loss: 0.5234840703862054 +epoch: 53, eval loss: 0.6988086104393005, correct: 7654, total: 10000, acc = 0.7653999924659729 +epoch time: 39.364726066589355 +epoch: 54, train loss: 0.5139317989957576 +epoch: 54, eval loss: 0.6950253814458847, correct: 7643, total: 10000, acc = 0.7642999887466431 +epoch time: 39.40451097488403 +epoch: 55, train loss: 0.5158528734226616 +epoch: 55, eval loss: 0.6978882610797882, correct: 7672, total: 10000, acc = 0.7671999931335449 +epoch time: 39.38926696777344 +epoch: 56, train loss: 0.5082419429506574 +epoch: 56, eval loss: 0.6909049898386002, correct: 7692, total: 10000, acc = 0.7691999673843384 +epoch time: 39.42493271827698 +epoch: 57, train loss: 0.5027476120360044 +epoch: 57, eval loss: 0.6897687911987305, correct: 7695, total: 10000, acc = 0.7694999575614929 +epoch time: 39.35954570770264 +epoch: 58, train loss: 0.5053188776483342 +epoch: 58, eval loss: 0.6899506479501725, correct: 7667, total: 10000, acc = 0.7666999697685242 +epoch time: 39.44884634017944 +epoch: 59, train loss: 0.4997740634241883 +epoch: 59, eval loss: 0.687486720085144, correct: 7678, total: 10000, acc = 0.767799973487854 +epoch time: 39.391881465911865 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-3.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-3.txt new file mode 100644 index 000000000..213cc80fe --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-3.txt @@ -0,0 +1,115 @@ +TACC: Starting up job 3497142 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +warning: variables which starts with __, is a module or class declaration are omitted +process rank 2 is bound to device 2 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 3 is bound to device 3 +Files already downloaded and verified +Files already downloaded and verified +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +warning: variables which starts with __, is a module or class declaration are omitted +process rank 1 is bound to device 1 +Files already downloaded and verified +Files already downloaded and verified +epoch: 0, train loss: 1.9320369898056498 +epoch: 1, train loss: 1.6352128605453335 +epoch: 1, eval loss: 1.5123237550258637, correct: 4542, total: 10000, acc = 0.45419999957084656 +epoch: 2, train loss: 1.4457968728882926 +epoch: 3, train loss: 1.3382204977833494 +epoch: 3, eval loss: 1.2539702713489533, correct: 5451, total: 10000, acc = 0.5450999736785889 +epoch: 4, train loss: 1.2739947474732691 +epoch: 5, train loss: 1.2285400483073021 +epoch: 5, eval loss: 1.1386113047599793, correct: 5908, total: 10000, acc = 0.5907999873161316 +epoch: 6, train loss: 1.1903334296479517 +epoch: 7, train loss: 1.1711674235305007 +epoch: 7, eval loss: 1.1258068561553956, correct: 5967, total: 10000, acc = 0.5967000126838684 +epoch: 8, train loss: 1.1419668745021432 +epoch: 9, train loss: 1.1143895728247506 +epoch: 9, eval loss: 1.040754759311676, correct: 6224, total: 10000, acc = 0.6223999857902527 +epoch: 10, train loss: 1.1041023871120141 +epoch: 11, train loss: 1.089750115968743 +epoch: 11, eval loss: 1.0472844064235687, correct: 6265, total: 10000, acc = 0.6265000104904175 +epoch: 12, train loss: 1.064698440687997 +epoch: 13, train loss: 1.038266262229608 +epoch: 13, eval loss: 1.0117274671792984, correct: 6415, total: 10000, acc = 0.6414999961853027 +epoch: 14, train loss: 1.029945282303557 +epoch: 15, train loss: 1.0171620620756734 +epoch: 15, eval loss: 0.9712629705667496, correct: 6519, total: 10000, acc = 0.6518999934196472 +epoch: 16, train loss: 0.9928132119227429 +epoch: 17, train loss: 0.9921575498824217 +epoch: 17, eval loss: 0.9429782271385193, correct: 6641, total: 10000, acc = 0.6640999913215637 +epoch: 18, train loss: 0.9607366293060536 +epoch: 19, train loss: 0.9427766927650997 +epoch: 19, eval loss: 0.9346068739891052, correct: 6623, total: 10000, acc = 0.6622999906539917 +epoch: 20, train loss: 0.9219280481338501 +epoch: 21, train loss: 0.8945026689646195 +epoch: 21, eval loss: 0.8710516095161438, correct: 6909, total: 10000, acc = 0.6908999681472778 +epoch: 22, train loss: 0.8807675826306246 +epoch: 23, train loss: 0.851514169756247 +epoch: 23, eval loss: 0.8239740908145905, correct: 7052, total: 10000, acc = 0.7051999568939209 +epoch: 24, train loss: 0.8388774534877466 +epoch: 25, train loss: 0.8265813291072845 +epoch: 25, eval loss: 0.8102335959672928, correct: 7137, total: 10000, acc = 0.713699996471405 +epoch: 26, train loss: 0.8057564490911912 +epoch: 27, train loss: 0.7816558753957554 +epoch: 27, eval loss: 0.7648743063211441, correct: 7292, total: 10000, acc = 0.729200005531311 +epoch: 28, train loss: 0.766656969883004 +epoch: 29, train loss: 0.7515677390049915 +epoch: 29, eval loss: 0.7517296761274338, correct: 7360, total: 10000, acc = 0.7360000014305115 +epoch: 30, train loss: 0.7300611174836451 +epoch: 31, train loss: 0.7038229193006244 +epoch: 31, eval loss: 0.7385401755571366, correct: 7375, total: 10000, acc = 0.7374999523162842 +epoch: 32, train loss: 0.6928578931458143 +epoch: 33, train loss: 0.672958068093475 +epoch: 33, eval loss: 0.6915913820266724, correct: 7596, total: 10000, acc = 0.7595999836921692 +epoch: 34, train loss: 0.6505378533382805 +epoch: 35, train loss: 0.6292881539889744 +epoch: 35, eval loss: 0.7068031072616577, correct: 7567, total: 10000, acc = 0.7566999793052673 +epoch: 36, train loss: 0.6092992303322773 +epoch: 37, train loss: 0.5922880838720166 +epoch: 37, eval loss: 0.6735526144504547, correct: 7662, total: 10000, acc = 0.7662000060081482 +epoch: 38, train loss: 0.5777627850065425 +epoch: 39, train loss: 0.562178050376931 +epoch: 39, eval loss: 0.6323211371898652, correct: 7799, total: 10000, acc = 0.7798999547958374 +epoch: 40, train loss: 0.5385949274106901 +epoch: 41, train loss: 0.5233490755971597 +epoch: 41, eval loss: 0.6360922038555146, correct: 7806, total: 10000, acc = 0.7805999517440796 +epoch: 42, train loss: 0.50960702373057 +epoch: 43, train loss: 0.48859657985823496 +epoch: 43, eval loss: 0.607847985625267, correct: 7914, total: 10000, acc = 0.7913999557495117 +epoch: 44, train loss: 0.47382923291654006 +epoch: 45, train loss: 0.45052725380780745 +epoch: 45, eval loss: 0.5986941397190094, correct: 8012, total: 10000, acc = 0.8011999726295471 +epoch: 46, train loss: 0.43711013392526277 +epoch: 47, train loss: 0.42507915229213483 +epoch: 47, eval loss: 0.5871582478284836, correct: 8002, total: 10000, acc = 0.8001999855041504 +epoch: 48, train loss: 0.40591827947266246 +epoch: 49, train loss: 0.3911267008100237 +epoch: 49, eval loss: 0.5832945287227631, correct: 8047, total: 10000, acc = 0.8046999573707581 +epoch: 50, train loss: 0.3770884950550235 +epoch: 51, train loss: 0.3587312725733738 +epoch: 51, eval loss: 0.5942261666059494, correct: 8073, total: 10000, acc = 0.8072999715805054 +epoch: 52, train loss: 0.34132662324272856 +epoch: 53, train loss: 0.3267737687850485 +epoch: 53, eval loss: 0.5920912757515907, correct: 8118, total: 10000, acc = 0.8118000030517578 +epoch: 54, train loss: 0.3116904997399875 +epoch: 55, train loss: 0.30321489380938665 +epoch: 55, eval loss: 0.5957943320274353, correct: 8082, total: 10000, acc = 0.8082000017166138 +epoch: 56, train loss: 0.2874147834218278 +epoch: 57, train loss: 0.27991348140093747 +epoch: 57, eval loss: 0.5895262002944947, correct: 8165, total: 10000, acc = 0.8165000081062317 +epoch: 58, train loss: 0.274563160173747 +epoch: 59, train loss: 0.2600744918596988 +epoch: 59, eval loss: 0.5934095367789268, correct: 8150, total: 10000, acc = 0.8149999976158142 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-4.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-4.txt new file mode 100644 index 000000000..513037271 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/421log1e-4.txt @@ -0,0 +1,115 @@ +TACC: Starting up job 3498509 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +warning: variables which starts with __, is a module or class declaration are omitted +process rank 2 is bound to device 2 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 3 is bound to device 3 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 1 is bound to device 1 +Files already downloaded and verified +Files already downloaded and verified +epoch: 0, train loss: 2.107759721425115 +epoch: 1, train loss: 1.8388929500871776 +epoch: 1, eval loss: 1.7622965753078461, correct: 3535, total: 10000, acc = 0.35349997878074646 +epoch: 2, train loss: 1.7141443588295762 +epoch: 3, train loss: 1.6003259931291853 +epoch: 3, eval loss: 1.608506625890732, correct: 4263, total: 10000, acc = 0.4262999892234802 +epoch: 4, train loss: 1.5016733225511045 +epoch: 5, train loss: 1.4050611877927974 +epoch: 5, eval loss: 1.386299443244934, correct: 4984, total: 10000, acc = 0.4983999729156494 +epoch: 6, train loss: 1.3264902623332278 +epoch: 7, train loss: 1.2681689250225923 +epoch: 7, eval loss: 1.3251740992069245, correct: 5295, total: 10000, acc = 0.5295000076293945 +epoch: 8, train loss: 1.2236176984650748 +epoch: 9, train loss: 1.172800781775494 +epoch: 9, eval loss: 1.1429427027702332, correct: 5966, total: 10000, acc = 0.5965999960899353 +epoch: 10, train loss: 1.1335287532027887 +epoch: 11, train loss: 1.0974334563527788 +epoch: 11, eval loss: 1.1024536848068238, correct: 6107, total: 10000, acc = 0.6107000112533569 +epoch: 12, train loss: 1.0638826300903244 +epoch: 13, train loss: 1.0406859383291127 +epoch: 13, eval loss: 1.0324654281139374, correct: 6282, total: 10000, acc = 0.6281999945640564 +epoch: 14, train loss: 1.0157714376644211 +epoch: 15, train loss: 0.990898135365272 +epoch: 15, eval loss: 0.9790050059556961, correct: 6539, total: 10000, acc = 0.6538999676704407 +epoch: 16, train loss: 0.963820260398242 +epoch: 17, train loss: 0.9404383374720203 +epoch: 17, eval loss: 0.9367435872554779, correct: 6691, total: 10000, acc = 0.6690999865531921 +epoch: 18, train loss: 0.9299906589546982 +epoch: 19, train loss: 0.9038882474510037 +epoch: 19, eval loss: 0.9210823565721512, correct: 6709, total: 10000, acc = 0.6708999872207642 +epoch: 20, train loss: 0.8825302799137271 +epoch: 21, train loss: 0.8686576388320144 +epoch: 21, eval loss: 0.8791542768478393, correct: 6913, total: 10000, acc = 0.6912999749183655 +epoch: 22, train loss: 0.8509396040926174 +epoch: 23, train loss: 0.8375457452268017 +epoch: 23, eval loss: 0.8651147484779358, correct: 6948, total: 10000, acc = 0.6947999596595764 +epoch: 24, train loss: 0.8163802222329744 +epoch: 25, train loss: 0.8068491317787949 +epoch: 25, eval loss: 0.8353333532810211, correct: 7089, total: 10000, acc = 0.708899974822998 +epoch: 26, train loss: 0.7894753631280393 +epoch: 27, train loss: 0.7779296344640304 +epoch: 27, eval loss: 0.8161472469568253, correct: 7143, total: 10000, acc = 0.7142999768257141 +epoch: 28, train loss: 0.763744876092794 +epoch: 29, train loss: 0.7521962505214068 +epoch: 29, eval loss: 0.7903082758188248, correct: 7219, total: 10000, acc = 0.7218999862670898 +epoch: 30, train loss: 0.7443178624522929 +epoch: 31, train loss: 0.7280340212948468 +epoch: 31, eval loss: 0.7877005040645599, correct: 7233, total: 10000, acc = 0.7232999801635742 +epoch: 32, train loss: 0.7196985489251663 +epoch: 33, train loss: 0.7108793039711154 +epoch: 33, eval loss: 0.7838329076766968, correct: 7292, total: 10000, acc = 0.729200005531311 +epoch: 34, train loss: 0.6965019471791326 +epoch: 35, train loss: 0.6875918537986522 +epoch: 35, eval loss: 0.7513678789138794, correct: 7392, total: 10000, acc = 0.7391999959945679 +epoch: 36, train loss: 0.6793362346230721 +epoch: 37, train loss: 0.6741023343436572 +epoch: 37, eval loss: 0.7752945452928544, correct: 7316, total: 10000, acc = 0.7315999865531921 +epoch: 38, train loss: 0.6629589072295597 +epoch: 39, train loss: 0.6507086388918818 +epoch: 39, eval loss: 0.7758691757917404, correct: 7322, total: 10000, acc = 0.7321999669075012 +epoch: 40, train loss: 0.6381483582817778 +epoch: 41, train loss: 0.6374095179596726 +epoch: 41, eval loss: 0.7589699536561966, correct: 7386, total: 10000, acc = 0.738599956035614 +epoch: 42, train loss: 0.6251792050137812 +epoch: 43, train loss: 0.6148473596086308 +epoch: 43, eval loss: 0.7495014071464539, correct: 7478, total: 10000, acc = 0.7477999925613403 +epoch: 44, train loss: 0.6119371378908351 +epoch: 45, train loss: 0.6012086509441843 +epoch: 45, eval loss: 0.725347763299942, correct: 7515, total: 10000, acc = 0.7515000104904175 +epoch: 46, train loss: 0.597867566103838 +epoch: 47, train loss: 0.5913592832429069 +epoch: 47, eval loss: 0.7254288077354432, correct: 7529, total: 10000, acc = 0.7529000043869019 +epoch: 48, train loss: 0.5801522807807339 +epoch: 49, train loss: 0.575563525666996 +epoch: 49, eval loss: 0.7291093468666077, correct: 7533, total: 10000, acc = 0.7532999515533447 +epoch: 50, train loss: 0.573031121674849 +epoch: 51, train loss: 0.5667383588698446 +epoch: 51, eval loss: 0.7240727603435516, correct: 7570, total: 10000, acc = 0.7569999694824219 +epoch: 52, train loss: 0.5578772419569443 +epoch: 53, train loss: 0.5526659309255834 +epoch: 53, eval loss: 0.7226850330829621, correct: 7576, total: 10000, acc = 0.7576000094413757 +epoch: 54, train loss: 0.5473246245968099 +epoch: 55, train loss: 0.5443006860358375 +epoch: 55, eval loss: 0.720612645149231, correct: 7596, total: 10000, acc = 0.7595999836921692 +epoch: 56, train loss: 0.5361242987671677 +epoch: 57, train loss: 0.5323515981435776 +epoch: 57, eval loss: 0.7203025311231613, correct: 7580, total: 10000, acc = 0.7579999566078186 +epoch: 58, train loss: 0.5297852404871766 +epoch: 59, train loss: 0.5288004583241989 +epoch: 59, eval loss: 0.7189624041318894, correct: 7605, total: 10000, acc = 0.7604999542236328 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-3.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-3.txt new file mode 100644 index 000000000..cda0d59ef --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-3.txt @@ -0,0 +1,131 @@ +TACC: Starting up job 3496458 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +warning: variables which starts with __, is a module or class declaration are omitted +process rank 3 is bound to device 3 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 2 is bound to device 2 +Files already downloaded and verified +Files already downloaded and verified +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +warning: variables which starts with __, is a module or class declaration are omitted +process rank 7 is bound to device 3 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 6 is bound to device 2 +Files already downloaded and verified +Files already downloaded and verified +optimizer is created +start training +warning: variables which starts with __, is a module or class declaration are omitted +process rank 4 is bound to device 0 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 5 is bound to device 1 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 1 is bound to device 1 +Files already downloaded and verified +Files already downloaded and verified +epoch: 0, train loss: 1.936693473738067 +epoch: 1, train loss: 1.627108974116189 +epoch: 1, eval loss: 1.5279120564460755, correct: 4576, total: 10000, acc = 0.4575999975204468 +epoch: 2, train loss: 1.438910031805233 +epoch: 3, train loss: 1.3184991053172521 +epoch: 3, eval loss: 1.3557079970836639, correct: 5129, total: 10000, acc = 0.5128999948501587 +epoch: 4, train loss: 1.271946340191121 +epoch: 5, train loss: 1.2340542175331894 +epoch: 5, eval loss: 1.207822185754776, correct: 5703, total: 10000, acc = 0.5702999830245972 +epoch: 6, train loss: 1.187913371592152 +epoch: 7, train loss: 1.154962458172623 +epoch: 7, eval loss: 1.0685692846775054, correct: 6100, total: 10000, acc = 0.6100000143051147 +epoch: 8, train loss: 1.1158924905621275 +epoch: 9, train loss: 1.0909727805731249 +epoch: 9, eval loss: 1.0345157146453858, correct: 6328, total: 10000, acc = 0.6327999830245972 +epoch: 10, train loss: 1.0725988399009316 +epoch: 11, train loss: 1.0453423085261364 +epoch: 11, eval loss: 0.9778846323490142, correct: 6543, total: 10000, acc = 0.6542999744415283 +epoch: 12, train loss: 1.0397504823548454 +epoch: 13, train loss: 1.011059400986652 +epoch: 13, eval loss: 0.9668682873249054, correct: 6446, total: 10000, acc = 0.644599974155426 +epoch: 14, train loss: 0.9938353963044225 +epoch: 15, train loss: 0.9691349967401854 +epoch: 15, eval loss: 0.9465512812137604, correct: 6657, total: 10000, acc = 0.6656999588012695 +epoch: 16, train loss: 0.9470896617490419 +epoch: 17, train loss: 0.927201622602891 +epoch: 17, eval loss: 0.8875106543302536, correct: 6837, total: 10000, acc = 0.6836999654769897 +epoch: 18, train loss: 0.8975223132542202 +epoch: 19, train loss: 0.8810242603019792 +epoch: 19, eval loss: 0.8688296616077423, correct: 6832, total: 10000, acc = 0.6832000017166138 +epoch: 20, train loss: 0.8482622784011218 +epoch: 21, train loss: 0.8266285700457436 +epoch: 21, eval loss: 0.7801274597644806, correct: 7205, total: 10000, acc = 0.7204999923706055 +epoch: 22, train loss: 0.8038581859092323 +epoch: 23, train loss: 0.7879118153027126 +epoch: 23, eval loss: 0.7779350578784943, correct: 7203, total: 10000, acc = 0.7202999591827393 +epoch: 24, train loss: 0.7542270896386127 +epoch: 25, train loss: 0.7369782894241567 +epoch: 25, eval loss: 0.7534965008497239, correct: 7362, total: 10000, acc = 0.7361999750137329 +epoch: 26, train loss: 0.7095995545387268 +epoch: 27, train loss: 0.6873777825005201 +epoch: 27, eval loss: 0.7344318777322769, correct: 7381, total: 10000, acc = 0.738099992275238 +epoch: 28, train loss: 0.6713967414534822 +epoch: 29, train loss: 0.650338428969286 +epoch: 29, eval loss: 0.677948921918869, correct: 7653, total: 10000, acc = 0.7652999758720398 +epoch: 30, train loss: 0.6301205882004329 +epoch: 31, train loss: 0.5990057824825754 +epoch: 31, eval loss: 0.6719370454549789, correct: 7643, total: 10000, acc = 0.7642999887466431 +epoch: 32, train loss: 0.590088236696866 +epoch: 33, train loss: 0.5689327443132595 +epoch: 33, eval loss: 0.6191721886396409, correct: 7807, total: 10000, acc = 0.7806999683380127 +epoch: 34, train loss: 0.5426055670392756 +epoch: 35, train loss: 0.5270413601276825 +epoch: 35, eval loss: 0.6150132775306701, correct: 7879, total: 10000, acc = 0.7878999710083008 +epoch: 36, train loss: 0.5215025428606539 +epoch: 37, train loss: 0.4952395400222467 +epoch: 37, eval loss: 0.628344652056694, correct: 7868, total: 10000, acc = 0.786799967288971 +epoch: 38, train loss: 0.47989121687655545 +epoch: 39, train loss: 0.46510300618045186 +epoch: 39, eval loss: 0.5977057978510857, correct: 7944, total: 10000, acc = 0.7943999767303467 +epoch: 40, train loss: 0.4441945254802704 +epoch: 41, train loss: 0.4285763985648447 +epoch: 41, eval loss: 0.5695438250899315, correct: 8023, total: 10000, acc = 0.802299976348877 +epoch: 42, train loss: 0.41337763776584546 +epoch: 43, train loss: 0.3940146170100387 +epoch: 43, eval loss: 0.5688270673155784, correct: 8091, total: 10000, acc = 0.8090999722480774 +epoch: 44, train loss: 0.37741332303504554 +epoch: 45, train loss: 0.36565779605690313 +epoch: 45, eval loss: 0.5831407308578491, correct: 8104, total: 10000, acc = 0.8104000091552734 +epoch: 46, train loss: 0.3468657017362361 +epoch: 47, train loss: 0.32949359198005834 +epoch: 47, eval loss: 0.5751512110233307, correct: 8097, total: 10000, acc = 0.8096999526023865 +epoch: 48, train loss: 0.3140165246262842 +epoch: 49, train loss: 0.29480520498995877 +epoch: 49, eval loss: 0.5712087765336037, correct: 8184, total: 10000, acc = 0.818399965763092 +epoch: 50, train loss: 0.2766021394303867 +epoch: 51, train loss: 0.26527753776433516 +epoch: 51, eval loss: 0.5643855139613152, correct: 8218, total: 10000, acc = 0.8217999935150146 +epoch: 52, train loss: 0.2525861115784061 +epoch: 53, train loss: 0.23714738658496312 +epoch: 53, eval loss: 0.5732526823878288, correct: 8249, total: 10000, acc = 0.8248999714851379 +epoch: 54, train loss: 0.2238179413335664 +epoch: 55, train loss: 0.2119908875652722 +epoch: 55, eval loss: 0.5957901775836945, correct: 8261, total: 10000, acc = 0.8260999917984009 +epoch: 56, train loss: 0.19989302222217833 +epoch: 57, train loss: 0.1875186789096618 +epoch: 57, eval loss: 0.5905491337180138, correct: 8290, total: 10000, acc = 0.8289999961853027 +epoch: 58, train loss: 0.18436841180129926 +epoch: 59, train loss: 0.17459663231762088 +epoch: 59, eval loss: 0.589044263958931, correct: 8313, total: 10000, acc = 0.8312999606132507 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-4.txt b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-4.txt new file mode 100644 index 000000000..6f69c17cd --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/log/822log1e-4.txt @@ -0,0 +1,131 @@ +TACC: Starting up job 3498327 +TACC: Starting parallel tasks... +warning: variables which starts with __, is a module or class declaration are omitted +process rank 0 is bound to device 0 +distributed environment is initialzied +model is created +Files already downloaded and verified +Files already downloaded and verified +training and testing dataloaders are created +loss is created +optimizer is created +start training +warning: variables which starts with __, is a module or class declaration are omitted +process rank 2 is bound to device 2 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 3 is bound to device 3 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 4 is bound to device 0 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 5 is bound to device 1 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 7 is bound to device 3 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 6 is bound to device 2 +Files already downloaded and verified +Files already downloaded and verified +warning: variables which starts with __, is a module or class declaration are omitted +process rank 1 is bound to device 1 +Files already downloaded and verified +Files already downloaded and verified +epoch: 0, train loss: 2.1005014667705613 +epoch: 1, train loss: 1.8539113086097094 +epoch: 1, eval loss: 1.7973519027233125, correct: 3362, total: 10000, acc = 0.3361999988555908 +epoch: 2, train loss: 1.7149482040989155 +epoch: 3, train loss: 1.5927067617980801 +epoch: 3, eval loss: 1.5848429083824158, correct: 4344, total: 10000, acc = 0.4343999922275543 +epoch: 4, train loss: 1.4912729798531046 +epoch: 5, train loss: 1.3957378158763962 +epoch: 5, eval loss: 1.4951884388923644, correct: 4841, total: 10000, acc = 0.48409998416900635 +epoch: 6, train loss: 1.3090402642074896 +epoch: 7, train loss: 1.2566283296565621 +epoch: 7, eval loss: 1.2464738070964814, correct: 5562, total: 10000, acc = 0.5561999678611755 +epoch: 8, train loss: 1.2084139476017075 +epoch: 9, train loss: 1.1706127719003327 +epoch: 9, eval loss: 1.162048089504242, correct: 5876, total: 10000, acc = 0.5875999927520752 +epoch: 10, train loss: 1.120817175933293 +epoch: 11, train loss: 1.084984731309268 +epoch: 11, eval loss: 1.0764922022819519, correct: 6155, total: 10000, acc = 0.6154999732971191 +epoch: 12, train loss: 1.0559214432628787 +epoch: 13, train loss: 1.0261321286765896 +epoch: 13, eval loss: 1.0338306188583375, correct: 6334, total: 10000, acc = 0.6333999633789062 +epoch: 14, train loss: 0.992842432187528 +epoch: 15, train loss: 0.9660871296512837 +epoch: 15, eval loss: 1.0059030145406722, correct: 6458, total: 10000, acc = 0.645799994468689 +epoch: 16, train loss: 0.9467733100968965 +epoch: 17, train loss: 0.9243187673237859 +epoch: 17, eval loss: 0.9469569176435471, correct: 6610, total: 10000, acc = 0.6609999537467957 +epoch: 18, train loss: 0.9059403721167116 +epoch: 19, train loss: 0.8819177935318071 +epoch: 19, eval loss: 0.9196836709976196, correct: 6727, total: 10000, acc = 0.6726999878883362 +epoch: 20, train loss: 0.8721987532109631 +epoch: 21, train loss: 0.8469706013494608 +epoch: 21, eval loss: 0.8634845405817032, correct: 6976, total: 10000, acc = 0.6976000070571899 +epoch: 22, train loss: 0.8352831839298716 +epoch: 23, train loss: 0.8124590455269327 +epoch: 23, eval loss: 0.8418784946203232, correct: 7034, total: 10000, acc = 0.7033999562263489 +epoch: 24, train loss: 0.7961219853284408 +epoch: 25, train loss: 0.7883704268202489 +epoch: 25, eval loss: 0.8191130340099335, correct: 7116, total: 10000, acc = 0.7116000056266785 +epoch: 26, train loss: 0.7733409623710477 +epoch: 27, train loss: 0.7561956893424598 +epoch: 27, eval loss: 0.8028618812561035, correct: 7200, total: 10000, acc = 0.7199999690055847 +epoch: 28, train loss: 0.7479740460308231 +epoch: 29, train loss: 0.7343520899208225 +epoch: 29, eval loss: 0.7829996794462204, correct: 7256, total: 10000, acc = 0.725600004196167 +epoch: 30, train loss: 0.7244430549290716 +epoch: 31, train loss: 0.7121965617549663 +epoch: 31, eval loss: 0.765428164601326, correct: 7299, total: 10000, acc = 0.7299000024795532 +epoch: 32, train loss: 0.6988190838268825 +epoch: 33, train loss: 0.6908610359746583 +epoch: 33, eval loss: 0.7602580636739731, correct: 7395, total: 10000, acc = 0.7394999861717224 +epoch: 34, train loss: 0.6785666395206841 +epoch: 35, train loss: 0.6664504153387887 +epoch: 35, eval loss: 0.7671193510293961, correct: 7345, total: 10000, acc = 0.734499990940094 +epoch: 36, train loss: 0.6639333245705585 +epoch: 37, train loss: 0.6509425913800999 +epoch: 37, eval loss: 0.7612941324710846, correct: 7382, total: 10000, acc = 0.7382000088691711 +epoch: 38, train loss: 0.6416311720196082 +epoch: 39, train loss: 0.6312643265237614 +epoch: 39, eval loss: 0.7380059510469437, correct: 7496, total: 10000, acc = 0.7495999932289124 +epoch: 40, train loss: 0.620578939209179 +epoch: 41, train loss: 0.6195461816933691 +epoch: 41, eval loss: 0.7172901630401611, correct: 7550, total: 10000, acc = 0.7549999952316284 +epoch: 42, train loss: 0.6013389248020795 +epoch: 43, train loss: 0.6049416010477104 +epoch: 43, eval loss: 0.7145429253578186, correct: 7569, total: 10000, acc = 0.7568999528884888 +epoch: 44, train loss: 0.5950779300563189 +epoch: 45, train loss: 0.5786038743598121 +epoch: 45, eval loss: 0.7171747118234635, correct: 7569, total: 10000, acc = 0.7568999528884888 +epoch: 46, train loss: 0.5752052083915594 +epoch: 47, train loss: 0.5669339743195748 +epoch: 47, eval loss: 0.7040806382894516, correct: 7601, total: 10000, acc = 0.7601000070571899 +epoch: 48, train loss: 0.5596802952338238 +epoch: 49, train loss: 0.5521421706189915 +epoch: 49, eval loss: 0.7221358746290207, correct: 7592, total: 10000, acc = 0.7591999769210815 +epoch: 50, train loss: 0.5504364164508119 +epoch: 51, train loss: 0.5363630725412952 +epoch: 51, eval loss: 0.710089972615242, correct: 7650, total: 10000, acc = 0.7649999856948853 +epoch: 52, train loss: 0.5382009008709265 +epoch: 53, train loss: 0.5292040118757559 +epoch: 53, eval loss: 0.7044323921203614, correct: 7672, total: 10000, acc = 0.7671999931335449 +epoch: 54, train loss: 0.5289747638970005 +epoch: 55, train loss: 0.5239191630056926 +epoch: 55, eval loss: 0.6983724802732467, correct: 7694, total: 10000, acc = 0.7694000005722046 +epoch: 56, train loss: 0.5177402243930467 +epoch: 57, train loss: 0.5132759012738053 +epoch: 57, eval loss: 0.7066506981849671, correct: 7671, total: 10000, acc = 0.7670999765396118 +epoch: 58, train loss: 0.5119742675095188 +epoch: 59, train loss: 0.5074386891661858 +epoch: 59, eval loss: 0.7012903690338135, correct: 7693, total: 10000, acc = 0.7692999839782715 +finish training diff --git a/tests/test_models/test_vision_transformer/test_vit_2p5d/test_vit_2p5d.py b/tests/test_models/test_vision_transformer/test_vit_2p5d/test_vit_2p5d.py new file mode 100644 index 000000000..1a576d039 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_2p5d/test_vit_2p5d.py @@ -0,0 +1,88 @@ +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py') + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output[0], + ParallelMode.PARALLEL_2P5D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_COL, + 0, + ) + output = _gather( + output, + ParallelMode.PARALLEL_2P5D_DEP, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label[0] == output) + correct_sum += correct + total_sum += label[0].size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2p5d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + logger.info('start training') + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2p5d_parallel_vision_transformer() \ No newline at end of file diff --git a/tests/test_models/test_vision_transformer/test_vit_3d/test_vit_3d.py b/tests/test_models/test_vision_transformer/test_vit_3d/test_vit_3d.py new file mode 100644 index 000000000..db78e9967 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_3d/test_vit_3d.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import time +from pathlib import Path + +import torch +from tqdm import tqdm + +from colossalai import initialize +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.trainer import Trainer +from colossalai.trainer.metric import Accuracy3D +from colossalai.utils import print_rank_0 + +CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_3d.py') + + +def _train_epoch(epoch, engine): + logger = get_global_dist_logger() + print_rank_0('[Epoch %d] training start' % (epoch), logger) + engine.train() + + train_loss = 0 + batch_cnt = 0 + num_samples = 0 + now = time.time() + epoch_start = now + progress = range(engine.schedule.num_steps) + if gpc.get_global_rank() == 0: + progress = tqdm(progress, desc='[Epoch %d]' % epoch, miniters=1) + for step in progress: + cur_lr = engine.get_lr() + + _, targets, loss = engine.step() + + batch_size = targets[0].size(0) + train_loss += loss.item() + num_samples += batch_size + batch_cnt += 1 + + batch_time = time.time() - now + now = time.time() + if gpc.get_global_rank() == 0: + print_features = dict(lr='%g' % cur_lr, + loss='%.3f' % (train_loss / (step + 1)), + throughput='%.3f (images/sec)' % + (batch_size / (batch_time + 1e-12))) + progress.set_postfix(**print_features) + + epoch_end = time.time() + epoch_loss = train_loss / batch_cnt + epoch_throughput = num_samples / (epoch_end - epoch_start + 1e-12) + print_rank_0( + '[Epoch %d] Loss: %.3f | Throughput: %.3f (samples/sec)' % + (epoch, epoch_loss, epoch_throughput), logger) + + +def _eval(epoch, engine): + logger = get_global_dist_logger() + engine.eval() + + eval_loss = 0 + acc = Accuracy3D(True, ParallelMode.PARALLEL_3D_OUTPUT, + ParallelMode.PARALLEL_3D_WEIGHT) + total = 0 + with torch.no_grad(): + for _ in range(engine.schedule.num_steps): + outputs, targets, loss = engine.step() + if isinstance(outputs, (list, tuple)): + outputs = outputs[0] + if isinstance(targets, (list, tuple)): + targets = targets[0] + eval_loss += loss.item() + acc.update(outputs, targets) + total += targets.size(0) + + print_rank_0( + '[Epoch %d] Evaluation loss: %.3f | Acc: %.3f%%' % + (epoch, eval_loss / engine.schedule.num_steps, + acc.get_accumulated_value() * 100), logger) + + +def train(): + model, train_dataloader, test_dataloader, criterion, \ + optimizer, schedule, lr_scheduler = initialize(CONFIG_PATH) + + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + logger.info("Engine is built", ranks=[0]) + + trainer = Trainer(engine=engine, hooks_cfg=gpc.config.hooks, verbose=True) + logger.info("Trainer is built", ranks=[0]) + + logger.info("Train start", ranks=[0]) + trainer.fit(train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + max_epochs=gpc.config.num_epochs, + display_progress=True, + test_interval=1) + + +if __name__ == '__main__': + train() diff --git a/tests/test_models/test_vision_transformer/test_vit_vanilla.py b/tests/test_models/test_vision_transformer/test_vit_vanilla.py new file mode 100644 index 000000000..f52161748 --- /dev/null +++ b/tests/test_models/test_vision_transformer/test_vit_vanilla.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from pathlib import Path + +import pytest +import torch + +from colossalai.builder import build_model +from colossalai.context import Config + +CONFIG_PATH = Path(__file__).parent.joinpath('configs/vanilla_vit.py') + + +@pytest.mark.cpu +def test_with_vanilla_vit_config(): + config = Config.from_file(CONFIG_PATH) + model = build_model(config.model) + model.build_from_cfg() + + img = torch.randn(1, 3, config.IMG_SIZE, config.IMG_SIZE) + out = model(img) + loss = out.mean() + loss.backward() + + +if __name__ == '__main__': + test_with_vanilla_vit_config() diff --git a/tests/test_trainer/configs/test_trainer_resnet.py b/tests/test_trainer/configs/test_trainer_resnet.py new file mode 100644 index 000000000..8979f4b09 --- /dev/null +++ b/tests/test_trainer/configs/test_trainer_resnet.py @@ -0,0 +1,94 @@ +import os +from pathlib import Path + +BATCH_SIZE = 128 +IMG_SIZE = 32 + +# resnet 50 +model = dict( + type='VanillaResNet', + block_type='ResNetBottleneck', + layers=[3, 4, 6, 3], + num_cls=10 +) + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +optimizer = dict( + type='SGD', + lr=0.2, + momentum=0.9, + weight_decay=5e-4 +) + +loss = dict( + type='CrossEntropyLoss', +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None), +) + +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='AccuracyHook'), + dict(type='LossHook'), + dict(type='TensorboardHook', log_dir='./tfb_logs'), + dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] + +# fp16 = dict( +# mode=AMP_TYPE.PARALLEL, +# initial_scale=1 +# ) + +lr_scheduler = dict( + type='CosineAnnealingLR', + T_max=200 +) + +num_epochs = 200 diff --git a/tests/test_trainer/configs/test_trainer_vit_2d.py b/tests/test_trainer/configs/test_trainer_vit_2d.py new file mode 100644 index 000000000..15c799039 --- /dev/null +++ b/tests/test_trainer/configs/test_trainer_vit_2d.py @@ -0,0 +1,135 @@ +import os +from pathlib import Path + +from colossalai.engine import AMP_TYPE + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict(type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ]), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + # num_workers=1, + shuffle=True, + )) + +test_data = dict( + dataset=dict(type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ]), + dataloader=dict( + batch_size=400, + pin_memory=True, + # num_workers=1, + )) + +optimizer = dict(type='Adam', lr=0.001, weight_decay=0) + +loss = dict(type='CrossEntropyLoss2D', ) + +# model = dict( +# type='VanillaResNet', +# block_type='ResNetBasicBlock', +# layers=[2, 2, 2, 2], +# num_cls=10 +# ) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict(type='ViTInputSplitter2D', ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict(type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict(type='VanillaViTDropPath', ), + mlp_cfg=dict(type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +hooks = [ + dict(type='LogMetricByEpochHook'), + dict(type='LogTimingByEpochHook'), + dict(type='Accuracy2DHook'), + dict(type='LossHook'), + dict(type='TensorboardHook', log_dir='./tfb_logs'), + dict(type='SaveCheckpointHook', interval=5, checkpoint_dir='./ckpt'), + # dict(type='LoadCheckpointHook', epoch=20, checkpoint_dir='./ckpt') +] + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +fp16 = dict(mode=AMP_TYPE.PARALLEL, initial_scale=2 ** 8) + +lr_scheduler = dict(type='LinearWarmupLR', warmup_epochs=5) + +schedule = dict(num_microbatches=1) + +num_epochs = 60 +num_microbatches = 1 + +logging = dict(root_path='./logs') diff --git a/tests/test_trainer/test.sh b/tests/test_trainer/test.sh new file mode 100644 index 000000000..65c4fc4bd --- /dev/null +++ b/tests/test_trainer/test.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env sh +test_file=$1 +config_file=$2 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 --config $config_file diff --git a/tests/test_trainer/test_trainer.py b/tests/test_trainer/test_trainer.py new file mode 100644 index 000000000..0c0a458b3 --- /dev/null +++ b/tests/test_trainer/test_trainer.py @@ -0,0 +1,37 @@ +import colossalai +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.trainer import Trainer + + +def test_trainer(): + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize() + logger = get_global_dist_logger() + + engine = Engine( + model=model, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule + ) + logger.info("engine is built", ranks=[0]) + + trainer = Trainer(engine=engine, + hooks_cfg=gpc.config.hooks, + verbose=True) + logger.info("trainer is built", ranks=[0]) + + logger.info("start training", ranks=[0]) + trainer.fit( + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + max_epochs=gpc.config.num_epochs, + display_progress=False, + test_interval=5 + ) + + +if __name__ == '__main__': + test_trainer() diff --git a/tests/test_utils/test_activation_checkpointing.py b/tests/test_utils/test_activation_checkpointing.py new file mode 100644 index 000000000..667b7c337 --- /dev/null +++ b/tests/test_utils/test_activation_checkpointing.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import pytest +import torch +import torch.nn.functional as F +from torch.utils.checkpoint import checkpoint + +from colossalai.context.parallel_mode import ParallelMode +from colossalai.context.random import add_seed, seed, set_mode +from colossalai.utils import checkpoint + + +def forward(x, weight): + out = torch.matmul(x, weight) + with seed(ParallelMode.DATA): + out_ = F.dropout(out, p=0.4, training=True) + return out_ + + +@pytest.mark.gpu +def test_activation_checkpointing(): + add_seed(ParallelMode.GLOBAL, 1024) + set_mode(ParallelMode.GLOBAL) + global_cuda_rng_state = torch.cuda.get_rng_state() + add_seed(ParallelMode.DATA, 1026) + set_mode(ParallelMode.DATA) + data_parallel_cuda_rng_state = torch.cuda.get_rng_state() + set_mode(ParallelMode.GLOBAL) + + # normal + data = torch.rand(2, 2, requires_grad=True).cuda() + data.retain_grad() + weight = torch.rand(2, 4, requires_grad=True).cuda() + + data_ = data.clone().detach() + data_.requires_grad = True + data_.retain_grad() + weight_ = weight.clone().detach() + weight_.requires_grad = True + + out = forward(data, weight) + loss = out.sum() + loss.backward() + + # checkpoint + set_mode(ParallelMode.GLOBAL) + torch.cuda.set_rng_state(global_cuda_rng_state) + set_mode(ParallelMode.DATA) + torch.cuda.set_rng_state(data_parallel_cuda_rng_state) + set_mode(ParallelMode.GLOBAL) + out = checkpoint(forward, data_, weight_) + loss = out.sum() + loss.backward() + + assert torch.all(data.grad == data_.grad), 'Gradient of the input does not match' + + +if __name__ == '__main__': + test_activation_checkpointing() diff --git a/tests/test_zero_data_parallel/config.py b/tests/test_zero_data_parallel/config.py new file mode 100644 index 000000000..3e9d081d1 --- /dev/null +++ b/tests/test_zero_data_parallel/config.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- +import os +from pathlib import Path + +BATCH_SIZE = 128 +IMG_SIZE = 224 +NUM_CLS = 1000 + +# resnet 18 +model = dict( + type='VanillaResNet', + block_type='ResNetBottleneck', + layers=[3, 4, 6, 3], + num_cls=NUM_CLS +) + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomResizedCrop', size=IMG_SIZE), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + ] + ), + dataloader=dict( + batch_size=64, + pin_memory=True, + num_workers=4, + sampler=dict( + type='DataParallelSampler', + shuffle=True, + ) + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=(IMG_SIZE, IMG_SIZE)), + dict(type='ToTensor'), + dict(type='Normalize', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + ) +) + +dist_initializer = [ + dict(type='DataParallelInitializer'), +] + +parallelization = dict( + pipeline=1, + tensor=1, + sequence=-1 +) + +optimizer = dict( + type='Adam', + lr=0.01 +) + +loss = dict( + type='CrossEntropyLoss' +) + +trainer = dict( + max_epochs=5, + max_iters=1000 +) + +amp = dict( + fp16=None, +) + +level = 2 + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=1, mode=None) +) diff --git a/tests/test_zero_data_parallel/test_zero.py b/tests/test_zero_data_parallel/test_zero.py new file mode 100644 index 000000000..e47ca61a5 --- /dev/null +++ b/tests/test_zero_data_parallel/test_zero.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os.path as osp + +import pytest +import torch +from torch.utils.data import DataLoader + +import colossalai +from colossalai.builder import build_dataset, build_loss, build_data_sampler, build_model +from colossalai.core import global_context +from colossalai.engine.gradient_handler import DataParallelGradientHandler +from colossalai.nn.optimizer import ZeroRedundancyOptimizer_Level_1, ZeroRedundancyOptimizer_Level_3, \ + ZeroRedundancyOptimizer_Level_2 +from colossalai.utils import print_rank_0 + +DIR_PATH = osp.dirname(osp.abspath(__file__)) +CONFIG_PATH = osp.join(DIR_PATH, 'config.py') + + +def run_dist(): + colossalai.init_dist(CONFIG_PATH) + + # build resnet model + model = build_model(global_context.config.model) + model.build_from_cfg() + model = model.cuda() + + level = global_context.config.level + + if level > 1: + model = model.half() + + # test init cuda memory + _ = torch.rand(1).cuda() + torch.cuda.synchronize() + max_alloc = torch.cuda.max_memory_allocated() + max_reserved = torch.cuda.max_memory_reserved() + print(f'before run: max_allocation = {max_alloc}, max_reserved = {max_reserved}') + + # build dataloader + train_dataset = build_dataset(global_context.config.train_data.dataset) + + sampler_cfg = global_context.config.train_data.dataloader.pop('sampler', None) + if sampler_cfg is None: + train_dataloader = DataLoader(dataset=train_dataset, **global_context.config.train_data.dataloader) + else: + sampler = build_data_sampler(sampler_cfg, train_dataset) + train_dataloader = DataLoader(dataset=train_dataset, sampler=sampler, + **global_context.config.train_data.dataloader) + + test_dataset = build_dataset(global_context.config.test_data.dataset) + test_dataloader = DataLoader(dataset=test_dataset, **global_context.config.test_data.dataloader) + + # build optimizer and loss + # optimizer = build_optimizer(global_context.config.optimizer, model) + optimizer = torch.optim.Adam(model.parameters(), lr=0.001) + if level == 1: + zero_optim = ZeroRedundancyOptimizer_Level_1(init_optimizer=optimizer, verbose=False) + elif level == 2: + zero_optim = ZeroRedundancyOptimizer_Level_2(init_optimizer=optimizer, cpu_offload=True, verbose=False) + elif level == 3: + zero_optim = ZeroRedundancyOptimizer_Level_3(init_optimizer=optimizer, + module=model, + verbose=False, + offload_optimizer_config=dict( + device='cpu', + pin_memory=True, + buffer_count=5, + fast_init=False + ), + offload_param_config=dict( + device='cpu', + pin_memory=True, + buffer_count=5, + buffer_size=1e8, + max_in_cpu=1e9 + ) + ) + + loss_fn = build_loss(global_context.config.loss) + gradient_handler = DataParallelGradientHandler(model, zero_optim) + + # train + for epoch in range(100): + model.train() + + # train + avg_train_loss = 0 + train_iter = 0 + + for idx, (data, label) in enumerate(train_dataloader): + # model = model.half() + data = data[0].cuda() + label = label[0].cuda() + + if level > 1: + data = data.half() + + output = model(data) + loss = loss_fn(output[0], label) + + if level > 1: + zero_optim.backward(loss) + zero_optim.overlapping_partition_gradients_reduce_epilogue() + else: + loss.backward() + gradient_handler.handle_gradient() + + zero_optim.step() + zero_optim.zero_grad() + + avg_train_loss += loss.detach().cpu().numpy() + train_iter += 1 + + print_rank_0(f'epoch: {epoch}, train loss: {avg_train_loss / train_iter}') + + if epoch % 2 == 0: + model.eval() + avg_eval_loss = 0 + correct = 0 + total = 0 + eval_iters = 0 + + for idx, (data, label) in enumerate(test_dataloader): + with torch.no_grad(): + data = data[0].cuda() + label = label[0].cuda() + + if level > 1: + data = data.half() + + output = model(data) + loss = loss_fn(output[0], label) + + avg_eval_loss += loss.detach().cpu().numpy() + preds = torch.argmax(output[0], dim=1) + total += data.size(0) + correct += sum(preds == label) + eval_iters += 1 + + print_rank_0(f'epoch: {epoch}, eval loss: {avg_eval_loss / eval_iters}, acc: {correct / total}') + + +@pytest.mark.skip("This test should be invoked manually using the script provided") +@pytest.mark.dist +def test_zero(): + run_dist() + + +if __name__ == '__main__': + test_zero() diff --git a/tests/test_zero_data_parallel/test_zero.sh b/tests/test_zero_data_parallel/test_zero.sh new file mode 100644 index 000000000..b725f52aa --- /dev/null +++ b/tests/test_zero_data_parallel/test_zero.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +test_file="test_zero.py" +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 \ No newline at end of file diff --git a/tests/test_zero_tensor_parallel/configs/vit_2d_zero1.py b/tests/test_zero_tensor_parallel/configs/vit_2d_zero1.py new file mode 100644 index 000000000..61efa61ed --- /dev/null +++ b/tests/test_zero_tensor_parallel/configs/vit_2d_zero1.py @@ -0,0 +1,159 @@ +import os +from pathlib import Path + +import torch + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +optimizer = dict( + type='ZeroRedundancyOptimizer', + optimizer_class=torch.optim.Adam, + lr=0.001, + weight_decay=0 +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +from colossalai.engine import AMP_TYPE + +fp16 = dict( + mode=AMP_TYPE.PARALLEL, + initial_scale=2 ** 4 +) + +# +# fp16 = dict( +# mode=None, +# ) + +# both level 2 and 3 work +# zero = dict( +# type='ZeroRedundancyOptimizer_Level_1', +# ) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +num_epochs = 60 diff --git a/tests/test_zero_tensor_parallel/configs/vit_2d_zero2.py b/tests/test_zero_tensor_parallel/configs/vit_2d_zero2.py new file mode 100644 index 000000000..2ce42a88c --- /dev/null +++ b/tests/test_zero_tensor_parallel/configs/vit_2d_zero2.py @@ -0,0 +1,149 @@ +import os +from pathlib import Path + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +# from colossalai.engine import AMP_TYPE +# +# fp16 = dict( +# mode=AMP_TYPE.PARALLEL, +# initial_scale=2 ** 4 +# ) + +fp16 = dict( + mode=None, +) + +# both level 2 and 3 work +zero = dict( + type='ZeroRedundancyOptimizer_Level_2' +) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +num_epochs = 60 diff --git a/tests/test_zero_tensor_parallel/configs/vit_2d_zero3.py b/tests/test_zero_tensor_parallel/configs/vit_2d_zero3.py new file mode 100644 index 000000000..61f2a46f3 --- /dev/null +++ b/tests/test_zero_tensor_parallel/configs/vit_2d_zero3.py @@ -0,0 +1,149 @@ +import os +from pathlib import Path + +BATCH_SIZE = 512 +IMG_SIZE = 32 +PATCH_SIZE = 4 +DIM = 512 +NUM_ATTENTION_HEADS = 8 +SUMMA_DIM = 2 +NUM_CLASSES = 10 +DEPTH = 6 + +train_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + transform_pipeline=[ + dict(type='RandomCrop', size=IMG_SIZE, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010]), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +test_data = dict( + dataset=dict( + type='CIFAR10Dataset', + root=Path(os.environ['DATA']), + train=False, + transform_pipeline=[ + dict(type='Resize', size=IMG_SIZE), + dict(type='ToTensor'), + dict(type='Normalize', + mean=[0.4914, 0.4822, 0.4465], + std=[0.2023, 0.1994, 0.2010] + ), + ] + ), + dataloader=dict( + batch_size=BATCH_SIZE, + pin_memory=True, + num_workers=4, + shuffle=True + ) +) + +optimizer = dict( + type='Adam', + lr=0.001, + weight_decay=0 +) + +loss = dict( + type='CrossEntropyLoss2D', +) + +model = dict( + type='VisionTransformerFromConfig', + tensor_splitting_cfg=dict( + type='ViTInputSplitter2D', + ), + embedding_cfg=dict( + type='ViTPatchEmbedding2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + ), + token_fusion_cfg=dict( + type='ViTTokenFuser2D', + img_size=IMG_SIZE, + patch_size=PATCH_SIZE, + embed_dim=DIM, + drop_rate=0.1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + block_cfg=dict( + type='ViTBlock', + attention_cfg=dict( + type='ViTSelfAttention2D', + hidden_size=DIM, + num_attention_heads=NUM_ATTENTION_HEADS, + attention_dropout_prob=0., + hidden_dropout_prob=0.1, + ), + droppath_cfg=dict( + type='VanillaViTDropPath', + ), + mlp_cfg=dict( + type='ViTMLP2D', + in_features=DIM, + dropout_prob=0.1, + mlp_ratio=1 + ), + norm_cfg=dict( + type='LayerNorm2D', + normalized_shape=DIM, + eps=1e-6, + ), + ), + head_cfg=dict( + type='ViTHead2D', + hidden_size=DIM, + num_classes=NUM_CLASSES, + ), + embed_dim=DIM, + depth=DEPTH, + drop_path_rate=0., +) + +parallel = dict( + pipeline=dict(size=1), + tensor=dict(size=4, mode='2d'), +) + +# from colossalai.engine import AMP_TYPE + +# fp16 = dict( +# mode=AMP_TYPE.PARALLEL, +# initial_scale=2 ** 4 +# ) + +fp16 = dict( + mode=None, +) + +# both level 2 and 3 work +zero = dict( + type='ZeroRedundancyOptimizer_Level_3' +) + +lr_scheduler = dict( + type='LinearWarmupLR', + warmup_epochs=5 +) + +num_epochs = 60 diff --git a/tests/test_zero_tensor_parallel/test.sh b/tests/test_zero_tensor_parallel/test.sh new file mode 100644 index 000000000..24d0c5423 --- /dev/null +++ b/tests/test_zero_tensor_parallel/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +test_file=$1 + +python $test_file --local_rank $SLURM_PROCID --world_size $SLURM_NPROCS --host $HOST --port 29500 \ No newline at end of file diff --git a/tests/test_zero_tensor_parallel/test_vit_2d/test_vit_2d.py b/tests/test_zero_tensor_parallel/test_vit_2d/test_vit_2d.py new file mode 100644 index 000000000..6533b3a6d --- /dev/null +++ b/tests/test_zero_tensor_parallel/test_vit_2d/test_vit_2d.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import os +from pathlib import Path + +import pytest +import torch.autograd + +import colossalai +from colossalai.context.parallel_mode import ParallelMode +from colossalai.core import global_context as gpc +from colossalai.engine import Engine +from colossalai.logging import get_global_dist_logger +from colossalai.nn.layer._parallel_utilities import _gather + +level = os.environ['LEVEL'] +CONFIG_PATH = Path(__file__).parent.parent.joinpath(f'configs/vit_2d_zero{level}.py') + + +def eval(engine): + engine.eval() + accumulated_loss = 0 + correct_sum = 0 + total_sum = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + + output = _gather( + output[0], + ParallelMode.PARALLEL_2D_ROW, + 1 + ) + output = _gather( + output, + ParallelMode.PARALLEL_2D_COL, + 0, + ) + output = torch.argmax(output, dim=-1) + correct = torch.sum(label[0] == output) + correct_sum += correct + total_sum += label[0].size(0) + avg_loss = accumulated_loss / engine.schedule.num_steps + return correct_sum, total_sum, avg_loss + + +def train(engine): + engine.train() + accumulated_loss = 0 + + for i in range(engine.schedule.num_steps): + output, label, loss = engine.step() + accumulated_loss += loss.detach().cpu().numpy() + avg_loss = accumulated_loss / engine.schedule.num_steps + return avg_loss + + +@pytest.mark.dist +@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus") +def test_2d_parallel_vision_transformer(): + # init dist + model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize( + CONFIG_PATH) + logger = get_global_dist_logger() + + engine = Engine(model=model, + train_dataloader=train_dataloader, + test_dataloader=test_dataloader, + criterion=criterion, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + schedule=schedule) + + # for param in model.parameters(): + # if isinstance(param, torch.HalfTensor): + # print(param.shape) + + logger.info('start training') + for epoch in range(gpc.config.num_epochs): + train_loss = train(engine) + + logger.info(f'epoch {epoch} - train loss: {train_loss}') + + if epoch % 2 == 0: + correct_sum, total_sum, eval_loss = eval(engine) + logger.info( + f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, ' + f'correct: {correct_sum}, acc: {correct_sum / total_sum}') + + +if __name__ == '__main__': + test_2d_parallel_vision_transformer()