mirror of https://github.com/hpcaitech/ColossalAI
fix format (#568)
parent
9420d3ae31
commit
2a915a8b62
|
@ -1,5 +1,3 @@
|
||||||
from .cuda_native import LayerNorm, FusedScaleMaskSoftmax, MultiHeadAttention
|
from .cuda_native import LayerNorm, FusedScaleMaskSoftmax, MultiHeadAttention
|
||||||
|
|
||||||
__all__ = [
|
__all__ = ["LayerNorm", "FusedScaleMaskSoftmax", "MultiHeadAttention"]
|
||||||
"LayerNorm", "FusedScaleMaskSoftmax", "MultiHeadAttention"
|
|
||||||
]
|
|
||||||
|
|
|
@ -1,71 +1,49 @@
|
||||||
// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu
|
// modified from
|
||||||
|
// https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu
|
||||||
#include <torch/extension.h>
|
#include <torch/extension.h>
|
||||||
|
|
||||||
void multi_tensor_scale_cuda(
|
void multi_tensor_scale_cuda(int chunk_size, at::Tensor noop_flag,
|
||||||
int chunk_size,
|
std::vector<std::vector<at::Tensor>> tensor_lists,
|
||||||
at::Tensor noop_flag,
|
float scale);
|
||||||
std::vector<std::vector<at::Tensor>> tensor_lists,
|
|
||||||
float scale);
|
|
||||||
|
|
||||||
void multi_tensor_sgd_cuda(
|
void multi_tensor_sgd_cuda(int chunk_size, at::Tensor noop_flag,
|
||||||
int chunk_size,
|
std::vector<std::vector<at::Tensor>> tensor_lists,
|
||||||
at::Tensor noop_flag,
|
float wd, float momentum, float dampening, float lr,
|
||||||
std::vector<std::vector<at::Tensor>> tensor_lists,
|
bool nesterov, bool first_run,
|
||||||
float wd,
|
bool wd_after_momentum, float scale);
|
||||||
float momentum,
|
|
||||||
float dampening,
|
|
||||||
float lr,
|
|
||||||
bool nesterov,
|
|
||||||
bool first_run,
|
|
||||||
bool wd_after_momentum,
|
|
||||||
float scale);
|
|
||||||
|
|
||||||
void multi_tensor_adam_cuda(
|
void multi_tensor_adam_cuda(int chunk_size, at::Tensor noop_flag,
|
||||||
int chunk_size,
|
std::vector<std::vector<at::Tensor>> tensor_lists,
|
||||||
at::Tensor noop_flag,
|
const float lr, const float beta1,
|
||||||
std::vector<std::vector<at::Tensor>> tensor_lists,
|
const float beta2, const float epsilon,
|
||||||
const float lr,
|
const int step, const int mode,
|
||||||
const float beta1,
|
const int bias_correction,
|
||||||
const float beta2,
|
const float weight_decay);
|
||||||
const float epsilon,
|
|
||||||
const int step,
|
|
||||||
const int mode,
|
|
||||||
const int bias_correction,
|
|
||||||
const float weight_decay);
|
|
||||||
|
|
||||||
void multi_tensor_lamb_cuda(
|
void multi_tensor_lamb_cuda(int chunk_size, at::Tensor noop_flag,
|
||||||
int chunk_size,
|
std::vector<std::vector<at::Tensor>> tensor_lists,
|
||||||
at::Tensor noop_flag,
|
const float lr, const float beta1,
|
||||||
std::vector<std::vector<at::Tensor>> tensor_lists,
|
const float beta2, const float epsilon,
|
||||||
const float lr,
|
const int step, const int bias_correction,
|
||||||
const float beta1,
|
const float weight_decay, const int grad_averaging,
|
||||||
const float beta2,
|
const int mode, at::Tensor global_grad_norm,
|
||||||
const float epsilon,
|
const float max_grad_norm,
|
||||||
const int step,
|
at::optional<bool> use_nvlamb_python);
|
||||||
const int bias_correction,
|
|
||||||
const float weight_decay,
|
|
||||||
const int grad_averaging,
|
|
||||||
const int mode,
|
|
||||||
at::Tensor global_grad_norm,
|
|
||||||
const float max_grad_norm,
|
|
||||||
at::optional<bool> use_nvlamb_python);
|
|
||||||
|
|
||||||
std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda(
|
std::tuple<at::Tensor, at::Tensor>
|
||||||
int chunk_size,
|
multi_tensor_l2norm_cuda(int chunk_size, at::Tensor noop_flag,
|
||||||
at::Tensor noop_flag,
|
std::vector<std::vector<at::Tensor>> tensor_lists,
|
||||||
std::vector<std::vector<at::Tensor>> tensor_lists,
|
at::optional<bool> per_tensor_python);
|
||||||
at::optional<bool> per_tensor_python);
|
|
||||||
|
|
||||||
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
|
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
|
||||||
{
|
m.def("multi_tensor_scale", &multi_tensor_scale_cuda,
|
||||||
m.def("multi_tensor_scale", &multi_tensor_scale_cuda,
|
|
||||||
"Fused overflow check + scale for a list of contiguous tensors");
|
"Fused overflow check + scale for a list of contiguous tensors");
|
||||||
m.def("multi_tensor_sgd", &multi_tensor_sgd_cuda,
|
m.def("multi_tensor_sgd", &multi_tensor_sgd_cuda,
|
||||||
"Fused SGD optimizer for list of contiguous tensors");
|
"Fused SGD optimizer for list of contiguous tensors");
|
||||||
m.def("multi_tensor_adam", &multi_tensor_adam_cuda,
|
m.def("multi_tensor_adam", &multi_tensor_adam_cuda,
|
||||||
"Compute and apply gradient update to parameters for Adam optimizer");
|
"Compute and apply gradient update to parameters for Adam optimizer");
|
||||||
m.def("multi_tensor_lamb", &multi_tensor_lamb_cuda,
|
m.def("multi_tensor_lamb", &multi_tensor_lamb_cuda,
|
||||||
"Computes and apply update for LAMB optimizer");
|
"Computes and apply update for LAMB optimizer");
|
||||||
m.def("multi_tensor_l2norm", &multi_tensor_l2norm_cuda,
|
m.def("multi_tensor_l2norm", &multi_tensor_l2norm_cuda,
|
||||||
"Computes L2 norm for a list of contiguous tensors");
|
"Computes L2 norm for a list of contiguous tensors");
|
||||||
}
|
}
|
Loading…
Reference in New Issue