From ae7c33810574ef1c47538418bfd6bc07fa6d9c6b Mon Sep 17 00:00:00 2001 From: zhengzangw Date: Fri, 20 May 2022 23:26:16 +0800 Subject: [PATCH] [NFC] polish colossalai/kernel/cuda_native/csrc/colossal_C_frontend.cpp code style --- .../kernel/cuda_native/csrc/colossal_C_frontend.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/colossalai/kernel/cuda_native/csrc/colossal_C_frontend.cpp b/colossalai/kernel/cuda_native/csrc/colossal_C_frontend.cpp index 17ab1741e..a687adc7b 100644 --- a/colossalai/kernel/cuda_native/csrc/colossal_C_frontend.cpp +++ b/colossalai/kernel/cuda_native/csrc/colossal_C_frontend.cpp @@ -30,10 +30,10 @@ void multi_tensor_lamb_cuda(int chunk_size, at::Tensor noop_flag, const float max_grad_norm, at::optional use_nvlamb_python); -std::tuple -multi_tensor_l2norm_cuda(int chunk_size, at::Tensor noop_flag, - std::vector> tensor_lists, - at::optional per_tensor_python); +std::tuple multi_tensor_l2norm_cuda( + int chunk_size, at::Tensor noop_flag, + std::vector> tensor_lists, + at::optional per_tensor_python); PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("multi_tensor_scale", &multi_tensor_scale_cuda,