From e83b2ce8531aa950d15d32670dd1cfc1d35519e3 Mon Sep 17 00:00:00 2001 From: XYE <92607131+Itok2000u@users.noreply.github.com> Date: Wed, 13 Jul 2022 11:33:16 +0800 Subject: [PATCH] [NFC] polish colossalai/nn/layer/vanilla/layers.py code style (#1295) --- colossalai/nn/layer/vanilla/layers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/colossalai/nn/layer/vanilla/layers.py b/colossalai/nn/layer/vanilla/layers.py index dfc37af13..a90871236 100644 --- a/colossalai/nn/layer/vanilla/layers.py +++ b/colossalai/nn/layer/vanilla/layers.py @@ -29,9 +29,9 @@ def drop_path(x, drop_prob: float = 0., training: bool = False): if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob - shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize + random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output @@ -190,7 +190,7 @@ class VanillaPatchEmbedding(nn.Module): f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." output = F.conv2d(input_, self.weight, self.bias, stride=self.patch_size) if self.flatten: - output = output.flatten(2).transpose(1, 2) # BCHW -> BNC + output = output.flatten(2).transpose(1, 2) # BCHW -> BNC cls_token = self.cls_token.expand(output.shape[0], -1, -1) output = torch.cat((cls_token, output), dim=1)