[NFC] polish colossalai/kernel/cuda_native/csrc/scaled_upper_triang_masked_softmax.h code style (#1270)

pull/1298/head
binmakeswell 2022-07-12 18:13:27 +08:00 committed by Frank Lee
parent 94bfd35184
commit c95e18cdb9
1 changed files with 500 additions and 400 deletions

View File

@ -4,11 +4,12 @@
#pragma once #pragma once
#include <assert.h> #include <assert.h>
#include <c10/macros/Macros.h>
#include <cuda_fp16.h> #include <cuda_fp16.h>
#include <stdint.h>
#include <cfloat> #include <cfloat>
#include <limits> #include <limits>
#include <stdint.h>
#include <c10/macros/Macros.h>
namespace { namespace {
@ -16,53 +17,78 @@ template <typename Datatype, int ELEMENTS_PER_LDG>
__device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src); __device__ __inline__ void copy_vector(Datatype *dst, const Datatype *src);
template <> template <>
__device__ __inline__ void copy_vector<c10::BFloat16, 1>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *dst = *src; } __device__ __inline__ void copy_vector<c10::BFloat16, 1>(
c10::BFloat16 *dst, const c10::BFloat16 *src) {
*dst = *src;
}
template <> template <>
__device__ __inline__ void copy_vector<c10::BFloat16, 4>(c10::BFloat16 *dst, const c10::BFloat16 *src) { *((float2*) dst) = *((float2*) src); } __device__ __inline__ void copy_vector<c10::BFloat16, 4>(
c10::BFloat16 *dst, const c10::BFloat16 *src) {
template <> *((float2 *)dst) = *((float2 *)src);
__device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst, const c10::Half *src) { *dst = *src; } }
template <> template <>
__device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst, const c10::Half *src) { *((float2*) dst) = *((float2*) src); } __device__ __inline__ void copy_vector<c10::Half, 1>(c10::Half *dst,
const c10::Half *src) {
*dst = *src;
}
template <> template <>
__device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst, const uint8_t *src) { *dst = *src; } __device__ __inline__ void copy_vector<c10::Half, 4>(c10::Half *dst,
const c10::Half *src) {
*((float2 *)dst) = *((float2 *)src);
}
template <> template <>
__device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst, const uint8_t *src) {*((half2*) dst) = *((half2*) src); } __device__ __inline__ void copy_vector<uint8_t, 1>(uint8_t *dst,
const uint8_t *src) {
*dst = *src;
}
template <>
__device__ __inline__ void copy_vector<uint8_t, 4>(uint8_t *dst,
const uint8_t *src) {
*((half2 *)dst) = *((half2 *)src);
}
template <typename Datatype, int ELEMENTS_PER_LDG> template <typename Datatype, int ELEMENTS_PER_LDG>
__device__ __inline__ void copy_zero_vector(Datatype *dst); __device__ __inline__ void copy_zero_vector(Datatype *dst);
template <> template <>
__device__ __inline__ void copy_zero_vector<c10::BFloat16, 1>(c10::BFloat16 *dst) { *dst = 0.0; } __device__ __inline__ void copy_zero_vector<c10::BFloat16, 1>(
c10::BFloat16 *dst) {
template <> *dst = 0.0;
__device__ __inline__ void copy_zero_vector<c10::BFloat16, 4>(c10::BFloat16 *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); }
template <>
__device__ __inline__ void copy_zero_vector<c10::Half, 1>(c10::Half *dst) { *dst = 0.0; }
template <>
__device__ __inline__ void copy_zero_vector<c10::Half, 4>(c10::Half *dst) { *((float2*) dst) = make_float2(0.0f, 0.0f); }
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
} }
template<typename T> template <>
__device__ __inline__ void copy_zero_vector<c10::BFloat16, 4>(
c10::BFloat16 *dst) {
*((float2 *)dst) = make_float2(0.0f, 0.0f);
}
template <>
__device__ __inline__ void copy_zero_vector<c10::Half, 1>(c10::Half *dst) {
*dst = 0.0;
}
template <>
__device__ __inline__ void copy_zero_vector<c10::Half, 4>(c10::Half *dst) {
*((float2 *)dst) = make_float2(0.0f, 0.0f);
}
int log2_ceil(int value) {
int log2_value = 0;
while ((1 << log2_value) < value) ++log2_value;
return log2_value;
}
template <typename T>
struct Add { struct Add {
__device__ __forceinline__ T operator()(T a, T b) const { __device__ __forceinline__ T operator()(T a, T b) const { return a + b; }
return a + b;
}
}; };
template<typename T> template <typename T>
struct Max { struct Max {
__device__ __forceinline__ T operator()(T a, T b) const { __device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a; return a < b ? b : a;
@ -70,431 +96,505 @@ struct Max {
}; };
template <typename T> template <typename T>
__device__ __forceinline__ T WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize, unsigned int mask = 0xffffffff) __device__ __forceinline__ T
{ WARP_SHFL_XOR_NATIVE(T value, int laneMask, int width = warpSize,
unsigned int mask = 0xffffffff) {
#if CUDA_VERSION >= 9000 #if CUDA_VERSION >= 9000
return __shfl_xor_sync(mask, value, laneMask, width); return __shfl_xor_sync(mask, value, laneMask, width);
#else #else
return __shfl_xor(value, laneMask, width); return __shfl_xor(value, laneMask, width);
#endif #endif
} }
template <typename acc_t, int WARP_BATCH, int WARP_SIZE, template<typename> class ReduceOp> template <typename acc_t, int WARP_BATCH, int WARP_SIZE,
__device__ __forceinline__ void warp_reduce(acc_t* sum) { template <typename> class ReduceOp>
ReduceOp<acc_t> r; __device__ __forceinline__ void warp_reduce(acc_t *sum) {
#pragma unroll ReduceOp<acc_t> r;
for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) { #pragma unroll
#pragma unroll for (int offset = WARP_SIZE / 2; offset > 0; offset /= 2) {
for (int i = 0; i < WARP_BATCH; ++i) { #pragma unroll
acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE); for (int i = 0; i < WARP_BATCH; ++i) {
sum[i] = r(sum[i], b); acc_t b = WARP_SHFL_XOR_NATIVE(sum[i], offset, WARP_SIZE);
} sum[i] = r(sum[i], b);
} }
}
} }
/* /*
* Extended softmax (from native aten pytorch) with following additional features * Extended softmax (from native aten pytorch) with following additional
* 1) input scaling * features 1) input scaling 2) Implicit time (diagonal masking)
* 2) Implicit time (diagonal masking)
*/ */
template <typename input_t, typename output_t, typename acc_t, int log2_elements> template <typename input_t, typename output_t, typename acc_t,
int log2_elements>
__global__ void scaled_upper_triang_masked_softmax_warp_forward( __global__ void scaled_upper_triang_masked_softmax_warp_forward(
output_t *dst, output_t *dst, const input_t *src, const acc_t scale, int micro_batch_size,
const input_t *src, int stride, int element_count) {
const acc_t scale, // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
int micro_batch_size, // warp_size of method warp_softmax_forward_kernel.
int stride, constexpr int next_power_of_two = 1 << log2_elements;
int element_count) constexpr int WARP_SIZE =
{ (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
// warp_size of method warp_softmax_forward_kernel. constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int next_power_of_two = 1 << log2_elements; constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; int first_batch =
int local_seq = blockIdx.x + 1; (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH +
int warp_iteration_limit = (local_seq + ELEMENTS_PER_LDG_STG * WARP_SIZE - 1)/ WARP_SIZE; blockIdx.x;
int local_seq = blockIdx.x + 1;
int warp_iteration_limit =
(local_seq + ELEMENTS_PER_LDG_STG * WARP_SIZE - 1) / WARP_SIZE;
// micro_batch_size might not be a multiple of WARP_BATCH. Check how // micro_batch_size might not be a multiple of WARP_BATCH. Check how
// many batches have to computed within this WARP. // many batches have to computed within this WARP.
int local_batches = micro_batch_size - first_batch; int local_batches = micro_batch_size - first_batch;
if (local_batches > WARP_BATCH) if (local_batches > WARP_BATCH) local_batches = WARP_BATCH;
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch // there might be multiple batches per warp. compute the index within the
int local_idx = threadIdx.x; // batch
int local_idx = threadIdx.x;
src += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; src += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx;
dst += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; dst += first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx;
// load data from global memory // load data from global memory
acc_t elements[WARP_BATCH][WARP_ITERATIONS]; acc_t elements[WARP_BATCH][WARP_ITERATIONS];
input_t temp_data[ELEMENTS_PER_LDG_STG]; input_t temp_data[ELEMENTS_PER_LDG_STG];
#pragma unroll #pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) { for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : local_seq; int batch_element_count = (i >= local_batches) ? 0 : local_seq;
#pragma unroll #pragma unroll
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < batch_element_count) { if (element_index < batch_element_count) {
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_data, src + i*element_count*stride + it*WARP_SIZE); copy_vector<input_t, ELEMENTS_PER_LDG_STG>(
temp_data, src + i * element_count * stride + it * WARP_SIZE);
#pragma unroll #pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
if ((element_index + element) < batch_element_count) { if ((element_index + element) < batch_element_count) {
elements[i][it+element] = (acc_t)temp_data[element] * scale; elements[i][it + element] = (acc_t)temp_data[element] * scale;
} else { } else {
elements[i][it + element] = -std::numeric_limits<acc_t>::infinity(); elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
} }
}
} else {
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
}
}
} }
} } else {
#pragma unroll
// compute max_value for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
acc_t max_value[WARP_BATCH]; elements[i][it + element] = -std::numeric_limits<acc_t>::infinity();
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
max_value[i] = elements[i][0];
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
max_value[i] = (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
} }
}
} }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value); }
acc_t sum[WARP_BATCH] { 0.0f }; // compute max_value
#pragma unroll acc_t max_value[WARP_BATCH];
for (int i = 0; i < WARP_BATCH; ++i) { #pragma unroll
#pragma unroll for (int i = 0; i < WARP_BATCH; ++i) {
for (int it = 0; it < WARP_ITERATIONS; ++it) { max_value[i] = elements[i][0];
if (it < warp_iteration_limit) { #pragma unroll
elements[i][it] = std::exp((elements[i][it] - max_value[i])); for (int it = 1; it < WARP_ITERATIONS; ++it) {
sum[i] += elements[i][it]; max_value[i] =
} (max_value[i] > elements[i][it]) ? max_value[i] : elements[i][it];
} }
} }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Max>(max_value);
// store result acc_t sum[WARP_BATCH]{0.0f};
output_t out[ELEMENTS_PER_LDG_STG]; #pragma unroll
#pragma unroll for (int i = 0; i < WARP_BATCH; ++i) {
for (int i = 0; i < WARP_BATCH; ++i) { #pragma unroll
if (i >= local_batches) for (int it = 0; it < WARP_ITERATIONS; ++it) {
break; if (it < warp_iteration_limit) {
#pragma unroll elements[i][it] = std::exp((elements[i][it] - max_value[i]));
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { sum[i] += elements[i][it];
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; }
}
if (element_index < local_seq) { }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { // store result
if (element_index + element < local_seq) { output_t out[ELEMENTS_PER_LDG_STG];
out[element] = elements[i][it + element] / sum[i]; #pragma unroll
} else { for (int i = 0; i < WARP_BATCH; ++i) {
out[element] = 0; if (i >= local_batches) break;
} #pragma unroll
} for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count * stride + it * WARP_SIZE, out); int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
} else if (element_index < element_count) {
copy_zero_vector<output_t, ELEMENTS_PER_LDG_STG>(dst + i * element_count * stride + it * WARP_SIZE); if (element_index < local_seq) {
} else { #pragma unroll
break; for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
} if (element_index + element < local_seq) {
out[element] = elements[i][it + element] / sum[i];
} else {
out[element] = 0;
}
} }
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(
dst + i * element_count * stride + it * WARP_SIZE, out);
} else if (element_index < element_count) {
copy_zero_vector<output_t, ELEMENTS_PER_LDG_STG>(
dst + i * element_count * stride + it * WARP_SIZE);
} else {
break;
}
} }
}
} }
template <typename input_t, typename output_t, typename acc_t, int log2_elements> template <typename input_t, typename output_t, typename acc_t,
int log2_elements>
__global__ void scaled_upper_triang_masked_softmax_warp_backward( __global__ void scaled_upper_triang_masked_softmax_warp_backward(
output_t *gradInput, output_t *gradInput, input_t *grad, const input_t *output, acc_t scale,
input_t *grad, int micro_batch_size, int stride, int element_count) {
const input_t *output, // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and
acc_t scale, // warp_size of method warp_softmax_backward_kernel.
int micro_batch_size, constexpr int next_power_of_two = 1 << log2_elements;
int stride, constexpr int WARP_SIZE =
int element_count) (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
{ constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
// WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
// warp_size of method warp_softmax_backward_kernel. constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
constexpr int next_power_of_two = 1 << log2_elements;
constexpr int WARP_SIZE = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
constexpr int WARP_ITERATIONS = next_power_of_two / WARP_SIZE;
constexpr int WARP_BATCH = (next_power_of_two <= 128) ? 2 : 1;
constexpr int ELEMENTS_PER_LDG_STG = (WARP_ITERATIONS < 4) ? 1 : 4;
int first_batch = (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH + blockIdx.x; int first_batch =
int local_seq = blockIdx.x + 1; (blockDim.y * blockIdx.y + threadIdx.y) * gridDim.x * WARP_BATCH +
blockIdx.x;
// micro_batch_size might not be a multiple of WARP_BATCH. Check how int local_seq = blockIdx.x + 1;
// many batches have to computed within this WARP.
int local_batches = micro_batch_size - first_batch;
if (local_batches > WARP_BATCH)
local_batches = WARP_BATCH;
// there might be multiple batches per warp. compute the index within the batch // micro_batch_size might not be a multiple of WARP_BATCH. Check how
int local_idx = threadIdx.x; // many batches have to computed within this WARP.
int local_batches = micro_batch_size - first_batch;
if (local_batches > WARP_BATCH) local_batches = WARP_BATCH;
// the first element to process by the current thread // there might be multiple batches per warp. compute the index within the
int thread_offset = first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx; // batch
grad += thread_offset; int local_idx = threadIdx.x;
output += thread_offset;
gradInput += thread_offset;
// load data from global memory // the first element to process by the current thread
acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; int thread_offset = first_batch * stride + ELEMENTS_PER_LDG_STG * local_idx;
acc_t output_reg[WARP_BATCH][WARP_ITERATIONS] { 0.0f }; grad += thread_offset;
input_t temp_grad[ELEMENTS_PER_LDG_STG]; output += thread_offset;
input_t temp_output[ELEMENTS_PER_LDG_STG]; gradInput += thread_offset;
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : local_seq;
#pragma unroll // load data from global memory
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { acc_t grad_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f};
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE; acc_t output_reg[WARP_BATCH][WARP_ITERATIONS]{0.0f};
if (element_index < batch_element_count) { input_t temp_grad[ELEMENTS_PER_LDG_STG];
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_grad, grad + i * element_count * stride + it * WARP_SIZE); input_t temp_output[ELEMENTS_PER_LDG_STG];
copy_vector<input_t, ELEMENTS_PER_LDG_STG>(temp_output, output + i * element_count * stride + it * WARP_SIZE); #pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
int batch_element_count = (i >= local_batches) ? 0 : local_seq;
#pragma unroll #pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
if (element_index + element < batch_element_count) { int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
output_reg[i][it + element] = (acc_t)temp_output[element]; if (element_index < batch_element_count) {
} copy_vector<input_t, ELEMENTS_PER_LDG_STG>(
} temp_grad, grad + i * element_count * stride + it * WARP_SIZE);
#pragma unroll copy_vector<input_t, ELEMENTS_PER_LDG_STG>(
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) { temp_output, output + i * element_count * stride + it * WARP_SIZE);
if (element_index + element < batch_element_count) {
grad_reg[i][it + element] = (acc_t)temp_grad[element] * output_reg[i][it + element]; #pragma unroll
} for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
} if (element_index + element < batch_element_count) {
} output_reg[i][it + element] = (acc_t)temp_output[element];
}
} }
} #pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
acc_t sum[WARP_BATCH]; if (element_index + element < batch_element_count) {
#pragma unroll grad_reg[i][it + element] =
for (int i = 0; i < WARP_BATCH; ++i) { (acc_t)temp_grad[element] * output_reg[i][it + element];
sum[i] = grad_reg[i][0]; }
#pragma unroll
for (int it = 1; it < WARP_ITERATIONS; ++it) {
sum[i] += grad_reg[i][it];
} }
}
} }
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum); }
// store result acc_t sum[WARP_BATCH];
#pragma unroll #pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) { for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches) sum[i] = grad_reg[i][0];
break; #pragma unroll
#pragma unroll for (int it = 1; it < WARP_ITERATIONS; ++it) {
for (int it = 0; it < WARP_ITERATIONS; it+=ELEMENTS_PER_LDG_STG) { sum[i] += grad_reg[i][it];
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < element_count) {
// compute gradients
output_t out[ELEMENTS_PER_LDG_STG];
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
out[element] = (output_t)(scale * (grad_reg[i][it + element] - output_reg[i][it + element] * sum[i]));
}
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(gradInput + i * element_count * stride + it * WARP_SIZE, out);
}
}
} }
}
warp_reduce<acc_t, WARP_BATCH, WARP_SIZE, Add>(sum);
// store result
#pragma unroll
for (int i = 0; i < WARP_BATCH; ++i) {
if (i >= local_batches) break;
#pragma unroll
for (int it = 0; it < WARP_ITERATIONS; it += ELEMENTS_PER_LDG_STG) {
int element_index = ELEMENTS_PER_LDG_STG * local_idx + it * WARP_SIZE;
if (element_index < element_count) {
// compute gradients
output_t out[ELEMENTS_PER_LDG_STG];
#pragma unroll
for (int element = 0; element < ELEMENTS_PER_LDG_STG; ++element) {
out[element] =
(output_t)(scale * (grad_reg[i][it + element] -
output_reg[i][it + element] * sum[i]));
}
copy_vector<output_t, ELEMENTS_PER_LDG_STG>(
gradInput + i * element_count * stride + it * WARP_SIZE, out);
}
}
}
} }
} // end of anonymous namespace } // end of anonymous namespace
template<typename input_t, typename output_t, typename acc_t> template <typename input_t, typename output_t, typename acc_t>
void dispatch_scaled_upper_triang_masked_softmax_forward( void dispatch_scaled_upper_triang_masked_softmax_forward(
output_t *dst, output_t *dst, const input_t *src, const input_t scale,
const input_t *src, int softmax_elements, int softmax_elements_stride, int attn_batches) {
const input_t scale, TORCH_INTERNAL_ASSERT(softmax_elements >= 0 && softmax_elements <= 2048);
int softmax_elements, if (softmax_elements == 0) {
int softmax_elements_stride, return;
int attn_batches) } else {
{ int log2_elements = log2_ceil(softmax_elements);
TORCH_INTERNAL_ASSERT(softmax_elements >= 0 && softmax_elements <= 2048 ); const int next_power_of_two = 1 << log2_elements;
if (softmax_elements == 0) { int seq_len = softmax_elements;
return; int batch_count = attn_batches * seq_len;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
int seq_len = softmax_elements;
int batch_count = attn_batches * seq_len;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_forward. // This value must match the WARP_SIZE constexpr value computed inside
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; // softmax_warp_forward.
int warp_size =
(next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_forward. // This value must match the WARP_BATCH constexpr value computed inside
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; // softmax_warp_forward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization // use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128; constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size); int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp; int batches_per_block = warps_per_block * batches_per_warp;
TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0); TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0);
int blocks_per_seq = attn_batches / batches_per_block; int blocks_per_seq = attn_batches / batches_per_block;
dim3 blocks(seq_len, blocks_per_seq, 1); dim3 blocks(seq_len, blocks_per_seq, 1);
dim3 threads(warp_size, warps_per_block, 1); dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR // Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) { switch (log2_elements) {
case 0: // 1 case 0: // 1
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 0> scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); acc_t, 0>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 1: // 2 dst, src, scale, batch_count, softmax_elements_stride,
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 1> softmax_elements);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); break;
break; case 1: // 2
case 2: // 4 scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 2> acc_t, 1>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; dst, src, scale, batch_count, softmax_elements_stride,
case 3: // 8 softmax_elements);
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 3> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); case 2: // 4
break; scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
case 4: // 16 acc_t, 2>
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 4> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); dst, src, scale, batch_count, softmax_elements_stride,
break; softmax_elements);
case 5: // 32 break;
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 5> case 3: // 8
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
break; acc_t, 3>
case 6: // 64 <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 6> dst, src, scale, batch_count, softmax_elements_stride,
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); softmax_elements);
break; break;
case 7: // 128 case 4: // 16
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 7> scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); acc_t, 4>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 8: // 256 dst, src, scale, batch_count, softmax_elements_stride,
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 8> softmax_elements);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); break;
break; case 5: // 32
case 9: // 512 scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 9> acc_t, 5>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; dst, src, scale, batch_count, softmax_elements_stride,
case 10: // 1024 softmax_elements);
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 10> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); case 6: // 64
break; scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
case 11: // 2048 acc_t, 6>
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t, acc_t, 11> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(dst, src, scale, batch_count, softmax_elements_stride, softmax_elements); dst, src, scale, batch_count, softmax_elements_stride,
break; softmax_elements);
default: break;
break; case 7: // 128
} scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
acc_t, 7>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
dst, src, scale, batch_count, softmax_elements_stride,
softmax_elements);
break;
case 8: // 256
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
acc_t, 8>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
dst, src, scale, batch_count, softmax_elements_stride,
softmax_elements);
break;
case 9: // 512
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
acc_t, 9>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
dst, src, scale, batch_count, softmax_elements_stride,
softmax_elements);
break;
case 10: // 1024
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
acc_t, 10>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
dst, src, scale, batch_count, softmax_elements_stride,
softmax_elements);
break;
case 11: // 2048
scaled_upper_triang_masked_softmax_warp_forward<input_t, output_t,
acc_t, 11>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
dst, src, scale, batch_count, softmax_elements_stride,
softmax_elements);
break;
default:
break;
} }
}
} }
template<typename input_t, typename output_t, typename acc_t> template <typename input_t, typename output_t, typename acc_t>
void dispatch_scaled_upper_triang_masked_softmax_backward( void dispatch_scaled_upper_triang_masked_softmax_backward(
output_t *grad_input, output_t *grad_input, input_t *grad, const input_t *output,
input_t *grad, const acc_t scale, int softmax_elements, int softmax_elements_stride,
const input_t *output, int attn_batches) {
const acc_t scale, TORCH_INTERNAL_ASSERT(softmax_elements >= 0 && softmax_elements <= 2048);
int softmax_elements, if (softmax_elements == 0) {
int softmax_elements_stride, return;
int attn_batches) } else {
{ int log2_elements = log2_ceil(softmax_elements);
TORCH_INTERNAL_ASSERT( softmax_elements >= 0 && softmax_elements <= 2048 ); const int next_power_of_two = 1 << log2_elements;
if (softmax_elements == 0) { int seq_len = softmax_elements;
return; int batch_count = attn_batches * seq_len;
} else {
int log2_elements = log2_ceil(softmax_elements);
const int next_power_of_two = 1 << log2_elements;
int seq_len = softmax_elements;
int batch_count = attn_batches * seq_len;
// This value must match the WARP_SIZE constexpr value computed inside softmax_warp_backward. // This value must match the WARP_SIZE constexpr value computed inside
int warp_size = (next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE; // softmax_warp_backward.
int warp_size =
(next_power_of_two < C10_WARP_SIZE) ? next_power_of_two : C10_WARP_SIZE;
// This value must match the WARP_BATCH constexpr value computed inside softmax_warp_backward. // This value must match the WARP_BATCH constexpr value computed inside
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1; // softmax_warp_backward.
int batches_per_warp = (next_power_of_two <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization // use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128; constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / warp_size); int warps_per_block = (threads_per_block / warp_size);
int batches_per_block = warps_per_block * batches_per_warp; int batches_per_block = warps_per_block * batches_per_warp;
TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0); TORCH_INTERNAL_ASSERT(attn_batches % batches_per_block == 0);
int blocks_per_seq = attn_batches / batches_per_block; int blocks_per_seq = attn_batches / batches_per_block;
dim3 blocks(seq_len, blocks_per_seq, 1); dim3 blocks(seq_len, blocks_per_seq, 1);
dim3 threads(warp_size, warps_per_block, 1); dim3 threads(warp_size, warps_per_block, 1);
// Launch code would be more elegant if C++ supported FOR CONSTEXPR // Launch code would be more elegant if C++ supported FOR CONSTEXPR
switch (log2_elements) { switch (log2_elements) {
case 0: // 1 case 0: // 1
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 0> scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); acc_t, 0>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 1: // 2 grad_input, grad, output, scale, batch_count,
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 1> softmax_elements_stride, softmax_elements);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); break;
break; case 1: // 2
case 2: // 4 scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 2> acc_t, 1>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; grad_input, grad, output, scale, batch_count,
case 3: // 8 softmax_elements_stride, softmax_elements);
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 3> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); case 2: // 4
break; scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
case 4: // 16 acc_t, 2>
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 4> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); grad_input, grad, output, scale, batch_count,
break; softmax_elements_stride, softmax_elements);
case 5: // 32 break;
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 5> case 3: // 8
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
break; acc_t, 3>
case 6: // 64 <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 6> grad_input, grad, output, scale, batch_count,
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); softmax_elements_stride, softmax_elements);
break; break;
case 7: // 128 case 4: // 16
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 7> scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); acc_t, 4>
break; <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
case 8: // 256 grad_input, grad, output, scale, batch_count,
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 8> softmax_elements_stride, softmax_elements);
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); break;
break; case 5: // 32
case 9: // 512 scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 9> acc_t, 5>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
break; grad_input, grad, output, scale, batch_count,
case 10: // 1024 softmax_elements_stride, softmax_elements);
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 10> break;
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); case 6: // 64
break; scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
case 11: // 2048 acc_t, 6>
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t, acc_t, 11> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(grad_input, grad, output, scale, batch_count, softmax_elements_stride, softmax_elements); grad_input, grad, output, scale, batch_count,
break; softmax_elements_stride, softmax_elements);
default: break;
break; case 7: // 128
} scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
acc_t, 7>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count,
softmax_elements_stride, softmax_elements);
break;
case 8: // 256
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
acc_t, 8>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count,
softmax_elements_stride, softmax_elements);
break;
case 9: // 512
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
acc_t, 9>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count,
softmax_elements_stride, softmax_elements);
break;
case 10: // 1024
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
acc_t, 10>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count,
softmax_elements_stride, softmax_elements);
break;
case 11: // 2048
scaled_upper_triang_masked_softmax_warp_backward<input_t, output_t,
acc_t, 11>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input, grad, output, scale, batch_count,
softmax_elements_stride, softmax_elements);
break;
default:
break;
} }
}
} }