mirror of https://github.com/hpcaitech/ColossalAI
[NFC] polish colossalai/amp/torch_amp/_grad_scaler.py code style (#1823)
Co-authored-by: Ziyue Jiang <ziyue.jiang@gmail.com>pull/1849/head
parent
90833b45dd
commit
5da03c936d
|
@ -3,16 +3,18 @@
|
|||
# modified from https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py
|
||||
# to support tensor parallel
|
||||
|
||||
import torch
|
||||
from collections import defaultdict, abc
|
||||
import warnings
|
||||
from collections import abc, defaultdict
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from colossalai.context import ParallelMode
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from colossalai.core import global_context as gpc
|
||||
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
|
||||
from packaging import version
|
||||
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
|
||||
|
||||
from colossalai.context import ParallelMode
|
||||
from colossalai.core import global_context as gpc
|
||||
|
||||
|
||||
class _MultiDeviceReplicator(object):
|
||||
|
|
Loading…
Reference in New Issue