From 5da03c936d0c823ee271879166b5ba23194980ff Mon Sep 17 00:00:00 2001 From: Ziyue Jiang Date: Tue, 8 Nov 2022 16:17:11 +0800 Subject: [PATCH] [NFC] polish colossalai/amp/torch_amp/_grad_scaler.py code style (#1823) Co-authored-by: Ziyue Jiang --- colossalai/amp/torch_amp/_grad_scaler.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/colossalai/amp/torch_amp/_grad_scaler.py b/colossalai/amp/torch_amp/_grad_scaler.py index de39b3e16..7b78998fb 100644 --- a/colossalai/amp/torch_amp/_grad_scaler.py +++ b/colossalai/amp/torch_amp/_grad_scaler.py @@ -3,16 +3,18 @@ # modified from https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py # to support tensor parallel -import torch -from collections import defaultdict, abc import warnings +from collections import abc, defaultdict from enum import Enum from typing import Any, Dict, List, Optional, Tuple -from colossalai.context import ParallelMode + +import torch import torch.distributed as dist -from colossalai.core import global_context as gpc -from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from packaging import version +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors + +from colossalai.context import ParallelMode +from colossalai.core import global_context as gpc class _MultiDeviceReplicator(object):