From 8c62e50dbb0720e5b51984d94d0a28e1bb71fe7f Mon Sep 17 00:00:00 2001 From: Mingyan Jiang <1829166702@qq.com> Date: Tue, 23 May 2023 13:11:03 +0800 Subject: [PATCH 1/7] [doc] update amp document --- docs/sidebars.json | 11 +++------- docs/source/en/basics/define_your_config.md | 20 ++++++++----------- .../en/features/mixed_precision_training.md | 3 ++- .../zh-Hans/basics/define_your_config.md | 19 +++++++++--------- .../features/mixed_precision_training.md | 3 ++- 5 files changed, 24 insertions(+), 32 deletions(-) diff --git a/docs/sidebars.json b/docs/sidebars.json index 1b3ddedd1..dc8317222 100644 --- a/docs/sidebars.json +++ b/docs/sidebars.json @@ -26,11 +26,8 @@ "collapsed": true, "items": [ "basics/command_line_tool", - "basics/launch_colossalai", - "basics/booster_api", - "basics/booster_plugins", - "basics/booster_checkpoint", "basics/define_your_config", + "basics/launch_colossalai", "basics/initialize_features", "basics/engine_trainer", "basics/configure_parallelization", @@ -43,10 +40,9 @@ "label": "Features", "collapsed": true, "items": [ + "features/mixed_precision_training_with_booster", "features/mixed_precision_training", - "features/gradient_accumulation_with_booster", "features/gradient_accumulation", - "features/gradient_clipping_with_booster", "features/gradient_clipping", "features/gradient_handler", "features/zero_with_chunk", @@ -62,8 +58,7 @@ ] }, "features/pipeline_parallel", - "features/nvme_offload", - "features/cluster_utils" + "features/nvme_offload" ] }, { diff --git a/docs/source/en/basics/define_your_config.md b/docs/source/en/basics/define_your_config.md index 048ffcacb..223af44f8 100644 --- a/docs/source/en/basics/define_your_config.md +++ b/docs/source/en/basics/define_your_config.md @@ -2,9 +2,6 @@ Author: Guangyang Lu, Shenggui Li, Siqi Mai -> ⚠️ The information on this page is outdated and will be deprecated. Please check [Booster API](../basics/booster_api.md) for more information. - - **Prerequisite:** - [Distributed Training](../concepts/distributed_training.md) - [Colossal-AI Overview](../concepts/colossalai_overview.md) @@ -24,8 +21,7 @@ In this tutorial, we will cover how to define your configuration file. ## Configuration Definition In a configuration file, there are two types of variables. One serves as feature specification and the other serves -as hyper-parameters. All feature-related variables are reserved keywords. For example, if you want to use mixed precision -training, you need to use the variable name `fp16` in the config file and follow a pre-defined format. +as hyper-parameters. All feature-related variables are reserved keywords. For example, if you want to use 1D tensor parallelism, you need to use the variable name `parallel` in the config file and follow a pre-defined format. ### Feature Specification @@ -37,14 +33,13 @@ To illustrate the use of config file, we use mixed precision training as an exam follow the steps below. 1. create a configuration file (e.g. `config.py`, the file name can be anything) -2. define the mixed precision configuration in the config file. For example, in order to use mixed precision training -natively provided by PyTorch, you can just write these lines of code below into your config file. +2. define the hybrid parallelism configuration in the config file. For example, in order to use 1D tensor parallel, you can just write these lines of code below into your config file. ```python - from colossalai.amp import AMP_TYPE - - fp16 = dict( - mode=AMP_TYPE.TORCH + parallel = dict( + data=1, + pipeline=1, + tensor=dict(size=2, mode='1d'), ) ``` @@ -57,7 +52,7 @@ the current directory. colossalai.launch(config='./config.py', ...) ``` -In this way, Colossal-AI knows what features you want to use and will inject this feature during `colossalai.initialize`. +In this way, Colossal-AI knows what features you want to use and will inject this feature. ### Global Hyper-parameters @@ -83,3 +78,4 @@ colossalai.launch(config='./config.py', ...) print(gpc.config.BATCH_SIZE) ``` + diff --git a/docs/source/en/features/mixed_precision_training.md b/docs/source/en/features/mixed_precision_training.md index 11aa52353..04f0bc6de 100644 --- a/docs/source/en/features/mixed_precision_training.md +++ b/docs/source/en/features/mixed_precision_training.md @@ -1,4 +1,4 @@ -# Auto Mixed Precision Training +# Auto Mixed Precision Training (Outdated) Author: Chuanrui Wang, Shenggui Li, Yongbin Li @@ -365,3 +365,4 @@ Use the following command to start the training scripts. You can change `--nproc ```python python -m torch.distributed.launch --nproc_per_node 4 --master_addr localhost --master_port 29500 train_with_engine.py --config config/config_AMP_torch.py ``` + diff --git a/docs/source/zh-Hans/basics/define_your_config.md b/docs/source/zh-Hans/basics/define_your_config.md index 720e75805..cf099f038 100644 --- a/docs/source/zh-Hans/basics/define_your_config.md +++ b/docs/source/zh-Hans/basics/define_your_config.md @@ -2,8 +2,6 @@ 作者: Guangyang Lu, Shenggui Li, Siqi Mai -> ⚠️ 此页面上的信息已经过时并将被废弃。请在[Booster API](../basics/booster_api.md)页面查阅更新。 - **预备知识:** - [分布式训练](../concepts/distributed_training.md) - [Colossal-AI 总览](../concepts/colossalai_overview.md) @@ -20,7 +18,7 @@ ## 配置定义 -在一个配置文件中,有两种类型的变量。一种是作为特征说明,另一种是作为超参数。所有与特征相关的变量都是保留关键字。例如,如果您想使用混合精度训练,需要在 config 文件中使用变量名`fp16`,并遵循预先定义的格式。 +在一个配置文件中,有两种类型的变量。一种是作为特征说明,另一种是作为超参数。所有与特征相关的变量都是保留关键字。例如,如果您想使用`1D`张量并行,需要在 config 文件中使用变量名`fp16`,并遵循预先定义的格式。 ### 功能配置 @@ -29,13 +27,13 @@ Colossal-AI 提供了一系列的功能来加快训练速度。每个功能都 为了说明配置文件的使用,我们在这里使用混合精度训练作为例子。您需要遵循以下步骤。 1. 创建一个配置文件(例如 `config.py`,您可以指定任意的文件名)。 -2. 在配置文件中定义混合精度的配置。例如,为了使用 PyTorch 提供的原始混合精度训练,您只需将下面这几行代码写入您的配置文件中。 +2. 在配置文件中定义混合并行的配置。例如,为了使用`1D`张量并行,您只需将下面这几行代码写入您的配置文件中。 - ```python - from colossalai.amp import AMP_TYPE - - fp16 = dict( - mode=AMP_TYPE.TORCH + ```python + parallel = dict( + data=1, + pipeline=1, + tensor=dict(size=2, mode='1d'), ) ``` @@ -47,7 +45,7 @@ Colossal-AI 提供了一系列的功能来加快训练速度。每个功能都 colossalai.launch(config='./config.py', ...) ``` -这样,Colossal-AI 便知道您想使用什么功能,并会在 `colossalai.initialize` 期间注入您所需要的功能。 +这样,Colossal-AI 便知道您想使用什么功能,并注入您所需要的功能。 ### 全局超参数 @@ -71,3 +69,4 @@ colossalai.launch(config='./config.py', ...) print(gpc.config.BATCH_SIZE) ``` + diff --git a/docs/source/zh-Hans/features/mixed_precision_training.md b/docs/source/zh-Hans/features/mixed_precision_training.md index c9db3a59c..b3b9a7ebc 100644 --- a/docs/source/zh-Hans/features/mixed_precision_training.md +++ b/docs/source/zh-Hans/features/mixed_precision_training.md @@ -1,4 +1,4 @@ -# 自动混合精度训练 (AMP) +# 自动混合精度训练 (旧版本) 作者: Chuanrui Wang, Shenggui Li, Yongbin Li @@ -342,3 +342,4 @@ for epoch in range(gpc.config.NUM_EPOCHS): ```python python -m torch.distributed.launch --nproc_per_node 4 --master_addr localhost --master_port 29500 train_with_engine.py --config config/config_AMP_torch.py ``` + From 1167bf5b102e5109499dd8dd9022cc4e77a63504 Mon Sep 17 00:00:00 2001 From: Mingyan Jiang <1829166702@qq.com> Date: Tue, 23 May 2023 13:14:36 +0800 Subject: [PATCH 2/7] [doc] update amp document --- docs/sidebars.json | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/sidebars.json b/docs/sidebars.json index dc8317222..8be40e451 100644 --- a/docs/sidebars.json +++ b/docs/sidebars.json @@ -26,8 +26,11 @@ "collapsed": true, "items": [ "basics/command_line_tool", - "basics/define_your_config", "basics/launch_colossalai", + "basics/booster_api", + "basics/booster_plugins", + "basics/booster_checkpoint", + "basics/define_your_config", "basics/initialize_features", "basics/engine_trainer", "basics/configure_parallelization", @@ -42,7 +45,9 @@ "items": [ "features/mixed_precision_training_with_booster", "features/mixed_precision_training", + "features/gradient_accumulation_with_booster", "features/gradient_accumulation", + "features/gradient_clipping_with_booster", "features/gradient_clipping", "features/gradient_handler", "features/zero_with_chunk", @@ -58,7 +63,8 @@ ] }, "features/pipeline_parallel", - "features/nvme_offload" + "features/nvme_offload", + "features/cluster_utils" ] }, { From a520610bd9eaaf6374779756fb6b2c15c390a303 Mon Sep 17 00:00:00 2001 From: Mingyan Jiang <1829166702@qq.com> Date: Tue, 23 May 2023 13:15:56 +0800 Subject: [PATCH 3/7] [doc] update amp document --- .../mixed_precision_training_with_booster.md | 251 ++++++++++++++++++ .../mixed_precision_training_with_booster.md | 235 ++++++++++++++++ 2 files changed, 486 insertions(+) create mode 100644 docs/source/en/features/mixed_precision_training_with_booster.md create mode 100644 docs/source/zh-Hans/features/mixed_precision_training_with_booster.md diff --git a/docs/source/en/features/mixed_precision_training_with_booster.md b/docs/source/en/features/mixed_precision_training_with_booster.md new file mode 100644 index 000000000..2ae88b071 --- /dev/null +++ b/docs/source/en/features/mixed_precision_training_with_booster.md @@ -0,0 +1,251 @@ +# Auto Mixed Precision Training (Latest) + +Author: [Mingyan Jiang](https://github.com/jiangmingyan) + +**Prerequisite** +- [Define Your Configuration](../basics/define_your_config.md) +- [Training Booster](../basics/booster_api.md) + +**Related Paper** +- [Accelerating Scientific Computations with Mixed Precision Algorithms](https://arxiv.org/abs/0808.2794) + + +## Introduction + +AMP stands for automatic mixed precision training. +In Colossal-AI, we have incorporated different implementations of mixed precision training: + +1. torch.cuda.amp +2. apex.amp +3. naive amp + + +| Colossal-AI | support tensor parallel | support pipeline parallel | fp16 extent | +| ----------- | ----------------------- | ------------------------- | ----------- | +| AMP_TYPE.TORCH | ✅ | ❌ | Model parameters, activation, gradients are downcast to fp16 during forward and backward propagation | +| AMP_TYPE.APEX | ❌ | ❌ | More fine-grained, we can choose opt_level O0, O1, O2, O3 | +| AMP_TYPE.NAIVE | ✅ | ✅ | Model parameters, forward and backward operations are all downcast to fp16 | + +The first two rely on the original implementation of PyTorch (version 1.6 and above) and NVIDIA Apex. +The last method is similar to Apex O2 level. +Among these methods, apex AMP is not compatible with tensor parallelism. +This is because that tensors are split across devices in tensor parallelism, thus, it is required to communicate among different processes to check if inf or nan occurs in the whole model weights. +We modified the torch amp implementation so that it is compatible with tensor parallelism now. + +> ❌️ fp16 and zero are not compatible +> +> ⚠️ Pipeline only support naive AMP currently + +We recommend you to use torch AMP as it generally gives better accuracy than naive AMP if no pipeline is used. + +## Table of Contents + +In this tutorial we will cover: + +1. [AMP introduction](#amp-introduction) +2. [AMP in Colossal-AI](#amp-in-colossal-ai) +3. [Hands-on Practice](#hands-on-practice) + +## AMP Introduction + +Automatic Mixed Precision training is a mixture of FP16 and FP32 training. + +Half-precision float point format (FP16) has lower arithmetic complexity and higher compute efficiency. Besides, fp16 requires half of the storage needed by fp32 and saves memory & network bandwidth, which makes more memory available for large batch size and model size. + +However, there are other operations, like reductions, which require the dynamic range of fp32 to avoid numeric overflow/underflow. That's the reason why we introduce automatic mixed precision, attempting to match each operation to its appropriate data type, which can reduce the memory footprint and augment training efficiency. + +
+ +
Illustration of an ordinary AMP (figure from PatrickStar paper)
+
+ +## AMP in Colossal-AI + +We supported three AMP training methods and allowed the user to train with AMP with no code. If you want to train with amp, just assign `mixed_precision` with `fp16` when you instantiate the `Booster`. Now booster support torch amp, the other two(apex amp, naive amp) are still started by `colossalai.initialize`, if needed, please refer to [this](./mixed_precision_training.md). Next we will support `bf16`, `fp8`. + +### Start with Booster +instantiate `Booster` with `mixed_precision="fp16"`, then you can train with torch amp. + +```python +""" + Mapping: + 'fp16': torch amp + 'fp16_apex': apex amp, + 'bf16': bf16, + 'fp8': fp8, + 'fp16_naive': naive amp +""" +from colossalai import Booster +booster = Booster(mixed_precision='fp16',...) +``` + +or you can create a `FP16TorchMixedPrecision` object, such as: + +```python +from colossalai.mixed_precision import FP16TorchMixedPrecision +mixed_precision = FP16TorchMixedPrecision( + init_scale=2.**16, + growth_factor=2.0, + backoff_factor=0.5, + growth_interval=2000) +booster = Booster(mixed_precision=mixed_precision,...) +``` + +The same goes for other types of amps. + + +### Torch AMP Configuration + +{{ autodoc:colossalai.booster.mixed_precision.FP16TorchMixedPrecision }} + +### Apex AMP Configuration + +For this mode, we rely on the Apex implementation for mixed precision training. +We support this plugin because it allows for finer control on the granularity of mixed precision. +For example, O2 level (optimization level 2) will keep batch normalization in fp32. + +If you look for more details, please refer to [Apex Documentation](https://nvidia.github.io/apex/). + +{{ autodoc:colossalai.booster.mixed_precision.FP16ApexMixedPrecision }} + +### Naive AMP Configuration + +In Naive AMP mode, we achieved mixed precision training while maintaining compatibility with complex tensor and pipeline parallelism. +This AMP mode will cast all operations into fp16. +The following code block shows the mixed precision api for this mode. + +{{ autodoc:colossalai.booster.mixed_precision.FP16NaiveMixedPrecision }} + +When using `colossalai.booster`, you are required to first instantiate a model, an optimizer and a criterion. +The output model is converted to AMP model of smaller memory consumption. +If your input model is already too large to fit in a GPU, please instantiate your model weights in `dtype=torch.float16`. +Otherwise, try smaller models or checkout more parallelization training techniques! + + +## Hands-on Practice + +Now we will introduce the use of AMP with Colossal-AI. In this practice, we will use Torch AMP as an example. + +### Step 1. Import libraries in train.py + +Create a `train.py` and import the necessary dependencies. Remember to install `scipy` and `timm` by running +`pip install timm scipy`. + +```python +import os +from pathlib import Path + +import torch +from timm.models import vit_base_patch16_224 +from titans.utils import barrier_context +from torchvision import datasets, transforms + +import colossalai +from colossalai.booster import Booster +from colossalai.booster.plugin import TorchDDPPlugin +from colossalai.logging import get_dist_logger +from colossalai.nn.lr_scheduler import LinearWarmupLR +``` + +### Step 2. Initialize Distributed Environment + +We then need to initialize distributed environment. For demo purpose, we uses `launch_from_torch`. You can refer to [Launch Colossal-AI](../basics/launch_colossalai.md) +for other initialization methods. + +```python +# initialize distributed setting +parser = colossalai.get_default_parser() +args = parser.parse_args() + +# launch from torch +colossalai.launch_from_torch(config=dict()) + +``` + +### Step 3. Create training components + +Build your model, optimizer, loss function, lr scheduler and dataloaders. Note that the root path of the dataset is +obtained from the environment variable `DATA`. You may `export DATA=/path/to/data` or change `Path(os.environ['DATA'])` +to a path on your machine. Data will be automatically downloaded to the root path. + +```python +# define the constants +NUM_EPOCHS = 2 +BATCH_SIZE = 128 + +# build model +model = vit_base_patch16_224(drop_rate=0.1) + +# build dataloader +train_dataset = datasets.Caltech101( + root=Path(os.environ['DATA']), + download=True, + transform=transforms.Compose([ + transforms.Resize(256), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + Gray2RGB(), + transforms.Normalize([0.5, 0.5, 0.5], + [0.5, 0.5, 0.5]) + ])) + +# build optimizer +optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, weight_decay=0.1) + +# build loss +criterion = torch.nn.CrossEntropyLoss() + +# lr_scheduler +lr_scheduler = LinearWarmupLR(optimizer, warmup_steps=50, total_steps=NUM_EPOCHS) +``` + +### Step 4. Inject AMP Feature + +Create a `MixedPrecision`(if needed) and `TorchDDPPlugin` object, call `colossalai.boost` convert the training components to be running with FP16. + +```python +plugin = TorchDDPPlugin() +train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True) +booster = Booster(mixed_precision='fp16', plugin=plugin) + +# if you need to customize the config, do like this +# >>> from colossalai.mixed_precision import FP16TorchMixedPrecision +# >>> mixed_precision = FP16TorchMixedPrecision( +# >>> init_scale=2.**16, +# >>> growth_factor=2.0, +# >>> backoff_factor=0.5, +# >>> growth_interval=2000) +# >>> plugin = TorchDDPPlugin() +# >>> booster = Booster(mixed_precision=mixed_precision, plugin=plugin) + +# boost model, optimizer, criterion, dataloader, lr_scheduler +model, optimizer, criterion, dataloader, lr_scheduler = booster.boost(model, optimizer, criterion, dataloader, lr_scheduler) +``` + +### Step 5. Train with Booster + +Use booster in a normal training loops. + +```python +model.train() +for epoch in range(NUM_EPOCHS): + for img, label in enumerate(train_dataloader): + img = img.cuda() + label = label.cuda() + optimizer.zero_grad() + output = model(img) + loss = criterion(output, label) + booster.backward(loss, optimizer) + optimizer.step() + lr_scheduler.step() +``` + +### Step 6. Invoke Training Scripts + +Use the following command to start the training scripts. You can change `--nproc_per_node` to use a different number of GPUs. + +```shell +colossalai run --nproc_per_node 1 train.py --config config/config.py +``` + diff --git a/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md b/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md new file mode 100644 index 000000000..689e28fcc --- /dev/null +++ b/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md @@ -0,0 +1,235 @@ +# 自动混合精度训练 (最新版本) + +作者: [Mingyan Jiang](https://github.com/jiangmingyan) + +**前置教程** +- [定义配置文件](../basics/define_your_config.md) +- [booster使用](../basics/booster_api.md) + +**相关论文** +- [Accelerating Scientific Computations with Mixed Precision Algorithms](https://arxiv.org/abs/0808.2794) + + +## 引言 + +AMP 代表自动混合精度训练。 +在 Colossal-AI 中, 我们结合了混合精度训练的不同实现: + +1. torch.cuda.amp +2. apex.amp +3. naive amp + + +| Colossal-AI | 支持张量并行 | 支持流水并行 | fp16范围 | +| ----------- | ----------------------- | ------------------------- | ----------- | +| AMP_TYPE.TORCH | ✅ | ❌ | 在前向和反向传播期间,模型参数、激活和梯度向下转换至fp16 | +| AMP_TYPE.APEX | ❌ | ❌ | 更细粒度,我们可以选择 opt_level O0, O1, O2, O3 | +| AMP_TYPE.NAIVE | ✅ | ✅ | 模型参数、前向和反向操作,全都向下转换至fp16 | + +前两个依赖于 PyTorch (1.6及以上) 和 NVIDIA Apex 的原始实现。最后一种方法类似 Apex O2。在这些方法中,Apex-AMP 与张量并行不兼容。这是因为张量是以张量并行的方式在设备之间拆分的,因此,需要在不同的进程之间进行通信,以检查整个模型权重中是否出现inf或nan。我们修改了torch amp实现,使其现在与张量并行兼容。 + +> ❌️ fp16与ZeRO不兼容 +> +> ⚠️ 流水并行目前仅支持naive amp + +我们建议使用 torch AMP,因为在不使用流水并行时,它通常比 NVIDIA AMP 提供更好的准确性。 + +## 目录 + +在本教程中,我们将介绍: + +1. [AMP 介绍](#amp-介绍) +2. [Colossal-AI 中的 AMP](#colossal-ai-中的-amp) +3. [练习实例](#实例) + +## AMP 介绍 + +自动混合精度训练是混合 FP16 和 FP32 训练。 + +半精度浮点格式(FP16)具有较低的算法复杂度和较高的计算效率。此外,FP16 仅需要 FP32 所需的一半存储空间,并节省了内存和网络带宽,从而为大 batch size 和大模型提供了更多内存。 + +然而,还有其他操作,如缩减,需要 FP32 的动态范围,以避免数值溢出/下溢。因此,我们引入自动混合精度,尝试将每个操作与其相应的数据类型相匹配,这可以减少内存占用并提高训练效率。 + +
+ +
AMP 示意图 (图片来自 PatrickStar 论文)
+
+ +## Colossal-AI 中的 AMP + +我们支持三种 AMP 训练方法,并允许用户在没有改变代码的情况下使用 AMP 进行训练。booster支持amp特性注入,如果您要使用混合精度训练,则在创建booster实例时指定`mixed_precision`参数,我们现已支持torch amp,apex amp, naive amp(现已移植torch amp至booster,apex amp, naive amp仍由`colossalai.initialize`方式启动,如您需使用,请[参考](./mixed_precision_training.md);后续将会拓展`bf16`,`pf8`的混合精度训练. + +#### booster启动方式 +您可以在创建booster实例时,指定`mixed_precision="fp16"`即使用torch amp。 + +```python +""" + 初始化映射关系如下: + 'fp16': torch amp + 'fp16_apex': apex amp, + 'bf16': bf16, + 'fp8': fp8, + 'fp16_naive': naive amp +""" +from colossalai import Booster +booster = Booster(mixed_precision='fp16',...) +``` + +或者您可以自定义一个`FP16TorchMixedPrecision`对象,如 + +```python +from colossalai.mixed_precision import FP16TorchMixedPrecision +mixed_precision = FP16TorchMixedPrecision( + init_scale=2.**16, + growth_factor=2.0, + backoff_factor=0.5, + growth_interval=2000) +booster = Booster(mixed_precision=mixed_precision,...) +``` + +其他类型的amp使用方式也是一样的。 + +### Torch AMP 配置 + +{{ autodoc:colossalai.booster.mixed_precision.FP16TorchMixedPrecision }} + +### Apex AMP 配置 + +对于这种模式,我们依靠 Apex 实现混合精度训练。我们支持这个插件,因为它允许对混合精度的粒度进行更精细的控制。 +例如, O2 水平 (优化器水平2) 将保持 batch normalization 为 FP32。 + +如果你想了解更多细节,请参考 [Apex Documentation](https://nvidia.github.io/apex/)。 + +{{ autodoc:colossalai.booster.mixed_precision.FP16ApexMixedPrecision }} + +### Naive AMP 配置 + +在 Naive AMP 模式中, 我们实现了混合精度训练,同时保持了与复杂张量和流水并行的兼容性。该 AMP 模式将所有操作转为 FP16 。下列代码块展示了该模式的booster启动方式。 + +{{ autodoc:colossalai.booster.mixed_precision.FP16NaiveMixedPrecision }} + +当使用`colossalai.booster`时, 首先需要实例化一个模型、一个优化器和一个标准。将输出模型转换为内存消耗较小的 AMP 模型。如果您的输入模型已经太大,无法放置在 GPU 中,请使用`dtype=torch.float16`实例化你的模型。或者请尝试更小的模型,或尝试更多的并行化训练技术! + +## 实例 + +下面我们将展现如何在 Colossal-AI 使用 AMP。在该例程中,我们使用 Torch AMP. + +### 步骤 1. 在 train.py 导入相关库 + +创建`train.py`并导入必要依赖. 请记得通过命令`pip install timm scipy`安装`scipy`和`timm`。 + +```python +import os +from pathlib import Path + +import torch +from timm.models import vit_base_patch16_224 +from titans.utils import barrier_context +from torchvision import datasets, transforms + +import colossalai +from colossalai.booster import Booster +from colossalai.booster.plugin import TorchDDPPlugin +from colossalai.logging import get_dist_logger +from colossalai.nn.lr_scheduler import LinearWarmupLR +``` + +### 步骤 2. 初始化分布式环境 + +我们需要初始化分布式环境。为了快速演示,我们使用`launch_from_torch`。你可以参考 [Launch Colossal-AI](../basics/launch_colossalai.md) +使用其他初始化方法。 + +```python +# 初始化分布式设置 +parser = colossalai.get_default_parser() +args = parser.parse_args() + +# launch from torch +colossalai.launch_from_torch(config=dict()) + +``` + +### 步骤 3. 创建训练组件 + +构建你的模型、优化器、损失函数、学习率调整器和数据加载器。注意数据集的路径从环境变量`DATA`获得。你可以通过 `export DATA=/path/to/data` 或 `Path(os.environ['DATA'])` +在你的机器上设置路径。数据将会被自动下载到该路径。 + +```python +# define the constants +NUM_EPOCHS = 2 +BATCH_SIZE = 128 +# build model +model = vit_base_patch16_224(drop_rate=0.1) + +# build dataloader +train_dataset = datasets.Caltech101( + root=Path(os.environ['DATA']), + download=True, + transform=transforms.Compose([ + transforms.Resize(256), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + Gray2RGB(), + transforms.Normalize([0.5, 0.5, 0.5], + [0.5, 0.5, 0.5]) + ])) + +# build optimizer +optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, weight_decay=0.1) + +# build loss +criterion = torch.nn.CrossEntropyLoss() + +# lr_scheduelr +lr_scheduler = LinearWarmupLR(optimizer, warmup_steps=50, total_steps=NUM_EPOCHS) +``` + +### 步骤 4. 插入 AMP +创建一个MixedPrecision对象(如果需要)及torchDDPPlugin对象,调用 `colossalai.boost` 将所有训练组件转为为FP16模式. + +```python +plugin = TorchDDPPlugin() +train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True) +booster = Booster(mixed_precision='fp16', plugin=plugin) + +# if you need to customize the config, do like this +# >>> from colossalai.mixed_precision import FP16TorchMixedPrecision +# >>> mixed_precision = FP16TorchMixedPrecision( +# >>> init_scale=2.**16, +# >>> growth_factor=2.0, +# >>> backoff_factor=0.5, +# >>> growth_interval=2000) +# >>> plugin = TorchDDPPlugin() +# >>> booster = Booster(mixed_precision=mixed_precision, plugin=plugin) + +# boost model, optimizer, criterion, dataloader, lr_scheduler +model, optimizer, criterion, dataloader, lr_scheduler = booster.boost(model, optimizer, criterion, dataloader, lr_scheduler) +``` + +### 步骤 5. 使用 booster 训练 + +使用booster构建一个普通的训练循环。 + +```python +model.train() +for epoch in range(NUM_EPOCHS): + for img, label in enumerate(train_dataloader): + img = img.cuda() + label = label.cuda() + optimizer.zero_grad() + output = model(img) + loss = criterion(output, label) + booster.backward(loss, optimizer) + optimizer.step() + lr_scheduler.step() +``` + +### 步骤 6. 启动训练脚本 + +使用下列命令启动训练脚本,你可以改变 `--nproc_per_node` 以使用不同数量的 GPU。 + +```shell +colossalai run --nproc_per_node 1 train.py --config config/config.py +``` + From 75272ef37ba7f128adc40c36276bc444c146064d Mon Sep 17 00:00:00 2001 From: jiangmingyan <1829166702@qq.com> Date: Tue, 23 May 2023 16:34:30 +0800 Subject: [PATCH 4/7] [doc] add removed warning --- docs/source/en/basics/define_your_config.md | 2 ++ docs/source/en/features/mixed_precision_training.md | 2 +- docs/source/zh-Hans/basics/define_your_config.md | 2 ++ docs/source/zh-Hans/features/mixed_precision_training.md | 2 +- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/source/en/basics/define_your_config.md b/docs/source/en/basics/define_your_config.md index 223af44f8..46b7112b7 100644 --- a/docs/source/en/basics/define_your_config.md +++ b/docs/source/en/basics/define_your_config.md @@ -2,6 +2,8 @@ Author: Guangyang Lu, Shenggui Li, Siqi Mai +- > ⚠️ The information on this page is outdated and will be deprecated. Please check [Booster API](../basics/booster_api.md) for more information. + **Prerequisite:** - [Distributed Training](../concepts/distributed_training.md) - [Colossal-AI Overview](../concepts/colossalai_overview.md) diff --git a/docs/source/en/features/mixed_precision_training.md b/docs/source/en/features/mixed_precision_training.md index 04f0bc6de..8579d586e 100644 --- a/docs/source/en/features/mixed_precision_training.md +++ b/docs/source/en/features/mixed_precision_training.md @@ -362,7 +362,7 @@ for epoch in range(gpc.config.NUM_EPOCHS): Use the following command to start the training scripts. You can change `--nproc_per_node` to use a different number of GPUs. -```python +```shell python -m torch.distributed.launch --nproc_per_node 4 --master_addr localhost --master_port 29500 train_with_engine.py --config config/config_AMP_torch.py ``` diff --git a/docs/source/zh-Hans/basics/define_your_config.md b/docs/source/zh-Hans/basics/define_your_config.md index cf099f038..d1de085e3 100644 --- a/docs/source/zh-Hans/basics/define_your_config.md +++ b/docs/source/zh-Hans/basics/define_your_config.md @@ -2,6 +2,8 @@ 作者: Guangyang Lu, Shenggui Li, Siqi Mai +- > ⚠️ 此页面上的信息已经过时并将被废弃。请在[Booster API](../basics/booster_api.md)页面查阅更新。 + **预备知识:** - [分布式训练](../concepts/distributed_training.md) - [Colossal-AI 总览](../concepts/colossalai_overview.md) diff --git a/docs/source/zh-Hans/features/mixed_precision_training.md b/docs/source/zh-Hans/features/mixed_precision_training.md index b3b9a7ebc..c4df6271b 100644 --- a/docs/source/zh-Hans/features/mixed_precision_training.md +++ b/docs/source/zh-Hans/features/mixed_precision_training.md @@ -339,7 +339,7 @@ for epoch in range(gpc.config.NUM_EPOCHS): 使用下列命令启动训练脚本,你可以改变 `--nproc_per_node` 以使用不同数量的 GPU。 -```python +```shell python -m torch.distributed.launch --nproc_per_node 4 --master_addr localhost --master_port 29500 train_with_engine.py --config config/config_AMP_torch.py ``` From c425a69d52c714423bbc5a55f6f3c609723993d9 Mon Sep 17 00:00:00 2001 From: jiangmingyan <1829166702@qq.com> Date: Tue, 23 May 2023 16:42:36 +0800 Subject: [PATCH 5/7] [doc] add removed change of config.py --- docs/source/en/basics/define_your_config.md | 20 ++++++++++--------- .../zh-Hans/basics/define_your_config.md | 19 +++++++++--------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/docs/source/en/basics/define_your_config.md b/docs/source/en/basics/define_your_config.md index 46b7112b7..048ffcacb 100644 --- a/docs/source/en/basics/define_your_config.md +++ b/docs/source/en/basics/define_your_config.md @@ -2,7 +2,8 @@ Author: Guangyang Lu, Shenggui Li, Siqi Mai -- > ⚠️ The information on this page is outdated and will be deprecated. Please check [Booster API](../basics/booster_api.md) for more information. +> ⚠️ The information on this page is outdated and will be deprecated. Please check [Booster API](../basics/booster_api.md) for more information. + **Prerequisite:** - [Distributed Training](../concepts/distributed_training.md) @@ -23,7 +24,8 @@ In this tutorial, we will cover how to define your configuration file. ## Configuration Definition In a configuration file, there are two types of variables. One serves as feature specification and the other serves -as hyper-parameters. All feature-related variables are reserved keywords. For example, if you want to use 1D tensor parallelism, you need to use the variable name `parallel` in the config file and follow a pre-defined format. +as hyper-parameters. All feature-related variables are reserved keywords. For example, if you want to use mixed precision +training, you need to use the variable name `fp16` in the config file and follow a pre-defined format. ### Feature Specification @@ -35,13 +37,14 @@ To illustrate the use of config file, we use mixed precision training as an exam follow the steps below. 1. create a configuration file (e.g. `config.py`, the file name can be anything) -2. define the hybrid parallelism configuration in the config file. For example, in order to use 1D tensor parallel, you can just write these lines of code below into your config file. +2. define the mixed precision configuration in the config file. For example, in order to use mixed precision training +natively provided by PyTorch, you can just write these lines of code below into your config file. ```python - parallel = dict( - data=1, - pipeline=1, - tensor=dict(size=2, mode='1d'), + from colossalai.amp import AMP_TYPE + + fp16 = dict( + mode=AMP_TYPE.TORCH ) ``` @@ -54,7 +57,7 @@ the current directory. colossalai.launch(config='./config.py', ...) ``` -In this way, Colossal-AI knows what features you want to use and will inject this feature. +In this way, Colossal-AI knows what features you want to use and will inject this feature during `colossalai.initialize`. ### Global Hyper-parameters @@ -80,4 +83,3 @@ colossalai.launch(config='./config.py', ...) print(gpc.config.BATCH_SIZE) ``` - diff --git a/docs/source/zh-Hans/basics/define_your_config.md b/docs/source/zh-Hans/basics/define_your_config.md index d1de085e3..720e75805 100644 --- a/docs/source/zh-Hans/basics/define_your_config.md +++ b/docs/source/zh-Hans/basics/define_your_config.md @@ -2,7 +2,7 @@ 作者: Guangyang Lu, Shenggui Li, Siqi Mai -- > ⚠️ 此页面上的信息已经过时并将被废弃。请在[Booster API](../basics/booster_api.md)页面查阅更新。 +> ⚠️ 此页面上的信息已经过时并将被废弃。请在[Booster API](../basics/booster_api.md)页面查阅更新。 **预备知识:** - [分布式训练](../concepts/distributed_training.md) @@ -20,7 +20,7 @@ ## 配置定义 -在一个配置文件中,有两种类型的变量。一种是作为特征说明,另一种是作为超参数。所有与特征相关的变量都是保留关键字。例如,如果您想使用`1D`张量并行,需要在 config 文件中使用变量名`fp16`,并遵循预先定义的格式。 +在一个配置文件中,有两种类型的变量。一种是作为特征说明,另一种是作为超参数。所有与特征相关的变量都是保留关键字。例如,如果您想使用混合精度训练,需要在 config 文件中使用变量名`fp16`,并遵循预先定义的格式。 ### 功能配置 @@ -29,13 +29,13 @@ Colossal-AI 提供了一系列的功能来加快训练速度。每个功能都 为了说明配置文件的使用,我们在这里使用混合精度训练作为例子。您需要遵循以下步骤。 1. 创建一个配置文件(例如 `config.py`,您可以指定任意的文件名)。 -2. 在配置文件中定义混合并行的配置。例如,为了使用`1D`张量并行,您只需将下面这几行代码写入您的配置文件中。 +2. 在配置文件中定义混合精度的配置。例如,为了使用 PyTorch 提供的原始混合精度训练,您只需将下面这几行代码写入您的配置文件中。 - ```python - parallel = dict( - data=1, - pipeline=1, - tensor=dict(size=2, mode='1d'), + ```python + from colossalai.amp import AMP_TYPE + + fp16 = dict( + mode=AMP_TYPE.TORCH ) ``` @@ -47,7 +47,7 @@ Colossal-AI 提供了一系列的功能来加快训练速度。每个功能都 colossalai.launch(config='./config.py', ...) ``` -这样,Colossal-AI 便知道您想使用什么功能,并注入您所需要的功能。 +这样,Colossal-AI 便知道您想使用什么功能,并会在 `colossalai.initialize` 期间注入您所需要的功能。 ### 全局超参数 @@ -71,4 +71,3 @@ colossalai.launch(config='./config.py', ...) print(gpc.config.BATCH_SIZE) ``` - From 8aa1fb2c7ffe9bd14aafdcae6d4fcf615dddfade Mon Sep 17 00:00:00 2001 From: jiangmingyan <1829166702@qq.com> Date: Tue, 23 May 2023 17:50:30 +0800 Subject: [PATCH 6/7] [doc]fix --- .../source/en/features/mixed_precision_training_with_booster.md | 2 +- .../zh-Hans/features/mixed_precision_training_with_booster.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/features/mixed_precision_training_with_booster.md b/docs/source/en/features/mixed_precision_training_with_booster.md index 2ae88b071..e9b6f684f 100644 --- a/docs/source/en/features/mixed_precision_training_with_booster.md +++ b/docs/source/en/features/mixed_precision_training_with_booster.md @@ -246,6 +246,6 @@ for epoch in range(NUM_EPOCHS): Use the following command to start the training scripts. You can change `--nproc_per_node` to use a different number of GPUs. ```shell -colossalai run --nproc_per_node 1 train.py --config config/config.py +colossalai run --nproc_per_node 1 train.py ``` diff --git a/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md b/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md index 689e28fcc..6954556a8 100644 --- a/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md +++ b/docs/source/zh-Hans/features/mixed_precision_training_with_booster.md @@ -230,6 +230,6 @@ for epoch in range(NUM_EPOCHS): 使用下列命令启动训练脚本,你可以改变 `--nproc_per_node` 以使用不同数量的 GPU。 ```shell -colossalai run --nproc_per_node 1 train.py --config config/config.py +colossalai run --nproc_per_node 1 train.py ``` From 278fcbc4444f15be8c155d382f4e1eaacdc89456 Mon Sep 17 00:00:00 2001 From: jiangmingyan <1829166702@qq.com> Date: Tue, 23 May 2023 17:53:11 +0800 Subject: [PATCH 7/7] [doc]fix --- docs/source/en/features/gradient_accumulation_with_booster.md | 2 +- docs/source/en/features/gradient_clipping_with_booster.md | 2 +- .../zh-Hans/features/gradient_accumulation_with_booster.md | 2 +- docs/source/zh-Hans/features/gradient_clipping_with_booster.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/en/features/gradient_accumulation_with_booster.md b/docs/source/en/features/gradient_accumulation_with_booster.md index f319ef5b2..201e3bc2b 100644 --- a/docs/source/en/features/gradient_accumulation_with_booster.md +++ b/docs/source/en/features/gradient_accumulation_with_booster.md @@ -128,7 +128,7 @@ for idx, (img, label) in enumerate(train_dataloader): ### Step 6. Invoke Training Scripts To verify gradient accumulation, we can just check the change of parameter values. When gradient accumulation is set, parameters are only updated in the last step. You can run the script using this command: ```shell -colossalai run --nproc_per_node 1 train.py --config config.py +colossalai run --nproc_per_node 1 train.py ``` You will see output similar to the text below. This shows gradient is indeed accumulated as the parameter is not updated diff --git a/docs/source/en/features/gradient_clipping_with_booster.md b/docs/source/en/features/gradient_clipping_with_booster.md index b9c7bb206..8686eb06f 100644 --- a/docs/source/en/features/gradient_clipping_with_booster.md +++ b/docs/source/en/features/gradient_clipping_with_booster.md @@ -136,7 +136,7 @@ for idx, (img, label) in enumerate(train_dataloader): You can run the script using this command: ```shell -colossalai run --nproc_per_node 1 train.py --config config/config.py +colossalai run --nproc_per_node 1 train.py ``` diff --git a/docs/source/zh-Hans/features/gradient_accumulation_with_booster.md b/docs/source/zh-Hans/features/gradient_accumulation_with_booster.md index 4dc0b3db4..ab86f34f2 100644 --- a/docs/source/zh-Hans/features/gradient_accumulation_with_booster.md +++ b/docs/source/zh-Hans/features/gradient_accumulation_with_booster.md @@ -131,7 +131,7 @@ for idx, (img, label) in enumerate(train_dataloader): ### 步骤 6. 启动训练脚本 为了验证梯度累积,我们可以只检查参数值的变化。当设置梯度累加时,仅在最后一步更新参数。您可以使用以下命令运行脚本: ```shell -colossalai run --nproc_per_node 1 train.py --config config.py +colossalai run --nproc_per_node 1 train.py ``` 你将会看到类似下方的文本输出。这展现了梯度虽然在前3个迭代中被计算,但直到最后一次迭代,参数才被更新。 diff --git a/docs/source/zh-Hans/features/gradient_clipping_with_booster.md b/docs/source/zh-Hans/features/gradient_clipping_with_booster.md index 2f023cefe..3c61356dd 100644 --- a/docs/source/zh-Hans/features/gradient_clipping_with_booster.md +++ b/docs/source/zh-Hans/features/gradient_clipping_with_booster.md @@ -135,6 +135,6 @@ for idx, (img, label) in enumerate(train_dataloader): 你可以使用以下命令运行脚本: ```shell -colossalai run --nproc_per_node 1 train.py --config config/config.py +colossalai run --nproc_per_node 1 train.py ```