[NFC] polish colossalai/kernel/cuda_native/csrc/layer_norm_cuda.cpp code style (#973)

pull/997/head
Ofey Chan 2022-05-16 11:30:41 +08:00 committed by binmakeswell
parent 598cde4a0f
commit 136946422b
1 changed files with 12 additions and 12 deletions

View File

@ -2,11 +2,13 @@
* https://github.com/NVIDIA/apex * https://github.com/NVIDIA/apex
* with minor changes. */ * with minor changes. */
#include "compat.h"
#include <cassert>
#include <torch/extension.h> #include <torch/extension.h>
#include <cassert>
#include <vector> #include <vector>
#include "compat.h"
namespace { namespace {
void compute_n1_n2(at::Tensor input, at::IntArrayRef normalized_shape, int &n1, void compute_n1_n2(at::Tensor input, at::IntArrayRef normalized_shape, int &n1,
@ -83,7 +85,6 @@ std::vector<at::Tensor> layer_norm_affine(at::Tensor input,
at::IntArrayRef normalized_shape, at::IntArrayRef normalized_shape,
at::Tensor gamma, at::Tensor beta, at::Tensor gamma, at::Tensor beta,
double epsilon) { double epsilon) {
CHECK_INPUT(input); CHECK_INPUT(input);
CHECK_INPUT(gamma); CHECK_INPUT(gamma);
CHECK_INPUT(beta); CHECK_INPUT(beta);
@ -109,11 +110,10 @@ void cuda_layer_norm_gradient(at::Tensor *dout, at::Tensor *mean,
double epsilon, at::Tensor *grad_input, double epsilon, at::Tensor *grad_input,
at::Tensor *grad_gamma, at::Tensor *grad_beta); at::Tensor *grad_gamma, at::Tensor *grad_beta);
std::vector<at::Tensor> std::vector<at::Tensor> layer_norm_gradient_affine(
layer_norm_gradient_affine(at::Tensor dout, at::Tensor mean, at::Tensor invvar, at::Tensor dout, at::Tensor mean, at::Tensor invvar, at::Tensor input,
at::Tensor input, at::IntArrayRef normalized_shape, at::IntArrayRef normalized_shape, at::Tensor gamma, at::Tensor beta,
at::Tensor gamma, at::Tensor beta, double epsilon) { double epsilon) {
CHECK_INPUT(dout); CHECK_INPUT(dout);
CHECK_INPUT(mean); CHECK_INPUT(mean);
CHECK_INPUT(invvar); CHECK_INPUT(invvar);