未验证 提交 0cae5c7f 编写于 作者: L limingshu 提交者: GitHub

Optimization for StackGradCUDAKernel for last dimension stack case. (#48992)

* add stack grad kernel optimization

* add basic optimization kernel for stack_grad_kernel

* optimization of stack_grad_kernel for last dim stack and change code format with pre-commit
上级 05df6973
...@@ -46,6 +46,9 @@ namespace phi { ...@@ -46,6 +46,9 @@ namespace phi {
namespace backends { namespace backends {
namespace gpu { namespace gpu {
// Limitation of the setting in one dimension of cuda grid.
constexpr int kMultiDimslimit = 65536;
template <typename T = int64_t> template <typename T = int64_t>
inline T DivUp(T a, T b) { inline T DivUp(T a, T b) {
return (a + b - 1) / b; return (a + b - 1) / b;
......
...@@ -13,15 +13,13 @@ ...@@ -13,15 +13,13 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/stack_grad_kernel.h" #include "paddle/phi/kernels/stack_grad_kernel.h"
#include "paddle/fluid/memory/memory.h" #include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
namespace phi { namespace phi {
template <typename T, typename IntType> template <typename T, typename IndexT>
__global__ void UnStackHelperCUDAKernel(const T* __restrict__ input, __global__ void UnStackHelperCUDAKernel(const T* __restrict__ input,
int pre_dim_size, int pre_dim_size,
int split_dim_size, int split_dim_size,
...@@ -33,104 +31,155 @@ __global__ void UnStackHelperCUDAKernel(const T* __restrict__ input, ...@@ -33,104 +31,155 @@ __global__ void UnStackHelperCUDAKernel(const T* __restrict__ input,
// In this case they are equal // In this case they are equal
assert(split_dim_size % num_split == 0); assert(split_dim_size % num_split == 0);
IntType size = pre_dim_size * split_dim_size * suf_dim_size; IndexT size = pre_dim_size * split_dim_size * suf_dim_size;
IntType each_dim_size = split_dim_size / num_split; IndexT each_dim_size = split_dim_size / num_split;
for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size; for (IndexT offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size;
offset += blockDim.x * gridDim.x) { offset += blockDim.x * gridDim.x) {
IntType i = offset / (split_dim_size * suf_dim_size); IndexT i = offset / (split_dim_size * suf_dim_size);
IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size; IndexT j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size;
IntType k = offset % suf_dim_size; IndexT k = offset % suf_dim_size;
T* output = output_ptrs[j / each_dim_size]; T* output = output_ptrs[j / each_dim_size];
if (output == nullptr) { if (output == nullptr) {
return; return;
} }
IntType output_ind = i * each_dim_size * suf_dim_size + IndexT output_ind = i * each_dim_size * suf_dim_size +
(j % each_dim_size) * suf_dim_size + k; (j % each_dim_size) * suf_dim_size + k;
*(output + output_ind) = input[offset]; *(output + output_ind) = input[offset];
} }
} }
template <typename T, typename Context> template <typename T, typename IndexT>
void StackGradKernel(const Context& dev_ctx, __global__ void StackGradKernelForLastDim(const T* __restrict__ in_data,
const DenseTensor& out, const IndexT cols,
int axis, const IndexT rows,
std::vector<DenseTensor*> x_grad) { const IndexT tile_x_num,
if (axis < 0) axis += out.dims().size(); T** out_datas) {
constexpr int buffer_size = 512;
__shared__ T s_buf[buffer_size];
for (IndexT tile_x = blockIdx.x; tile_x < tile_x_num; tile_x += gridDim.x) {
IndexT row_idx = tile_x * blockDim.x + threadIdx.x;
IndexT col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int s_idx = threadIdx.y * blockDim.x + threadIdx.x;
bool is_valid = (col_idx < cols && row_idx < rows);
int n = out.dims()[axis]; if (is_valid) {
PADDLE_ENFORCE_EQ(n, T data = in_data[row_idx * cols + col_idx];
s_buf[s_idx] = data;
}
__syncthreads();
if (is_valid) {
if (out_datas[col_idx] != nullptr) {
out_datas[col_idx][row_idx] = s_buf[s_idx];
}
}
}
}
template <typename Context, typename T, typename IndexT>
void LaunchStackGradCUDAKernel(const Context& ctx,
const DenseTensor& out,
std::vector<DenseTensor*>* x_grad_ptr,
const int axis,
const int64_t dy_pre) {
auto x_grad = *x_grad_ptr;
int out_num = out.dims()[axis];
PADDLE_ENFORCE_EQ(
out_num,
x_grad.size(), x_grad.size(),
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"Output x_grad size should be equal to n, but" "Output x_grad size shall be equal to output num, but output num "
" received n is:%d x_grad size is:%d.", "received in stack_grad op is:%d, and x_grad size is:%d.",
n, out_num,
x_grad.size())); x_grad.size()));
std::vector<T*> outputs(out_num);
// x_grad is output, so save each data address, then copy each dy into dx_data for (size_t j = 0; j < out_num; ++j) {
std::vector<T*> outputs(n); if (x_grad[j] == nullptr || x_grad[j]->numel() == 0UL) {
for (size_t j = 0; j < x_grad.size(); ++j) {
if (x_grad[j] == nullptr) {
outputs[j] = nullptr; outputs[j] = nullptr;
continue;
}
if (x_grad[j]->numel() != 0UL) {
T* ptr = dev_ctx.template Alloc<T>(x_grad[j]);
outputs[j] = ptr;
} else { } else {
outputs[j] = nullptr; outputs[j] = ctx.template Alloc<T>(x_grad[j]);
} }
} }
auto dy_data = out.data<T>();
// each x_grad should have same shape
int dy_pre = 1, dy_suf = 1;
auto dy_dims = out.dims();
int split_dim = n;
for (int i = 0; i < axis; ++i) {
dy_pre *= dy_dims[i];
}
dy_suf = out.numel() / (split_dim * dy_pre);
auto tmp_out_data = paddle::memory::Alloc( auto tmp_out_data = paddle::memory::Alloc(
dev_ctx.GetPlace(), ctx.GetPlace(),
outputs.size() * sizeof(T*), out_num * sizeof(T*),
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); phi::Stream(reinterpret_cast<phi::StreamId>(ctx.stream())));
paddle::memory::Copy(dev_ctx.GetPlace(), paddle::memory::Copy(ctx.GetPlace(),
tmp_out_data->ptr(), tmp_out_data->ptr(),
phi::CPUPlace(), phi::CPUPlace(),
reinterpret_cast<void*>(outputs.data()), reinterpret_cast<void*>(outputs.data()),
outputs.size() * sizeof(T*), out_num * sizeof(T*),
dev_ctx.stream()); ctx.stream());
auto config = phi::backends::gpu::GetGpuLaunchConfig1D( if (axis == (out.dims().size() - 1)) {
dev_ctx, dy_pre * split_dim * dy_suf); constexpr int kThreads = 512;
constexpr int kWarpSize = 32;
if (out.numel() < std::numeric_limits<int32_t>::max()) { constexpr int kMaxOut = 16;
UnStackHelperCUDAKernel<T, int32_t> int tid_x = 0, tid_y = 0, bid_x = 0, bid_y = 1;
<<<config.block_per_grid.x, bool is_small_num = out_num < kMaxOut;
config.thread_per_block.x,
0, if (is_small_num) {
dev_ctx.stream()>>>(dy_data, tid_y = out_num;
tid_x =
std::min(backends::gpu::RoundToNextHighPowOfTwo(dy_pre, kWarpSize),
kThreads / backends::gpu::RoundToNextHighPowOfTwo(tid_y));
} else {
tid_y = kMaxOut;
tid_x = kWarpSize;
bid_y = backends::gpu::DivUp<int>(out_num, kMaxOut);
}
int tile_x_num = backends::gpu::DivUp<int>(dy_pre, tid_x);
bid_x = std::min(tile_x_num, backends::gpu::kMultiDimslimit);
dim3 blocks(tid_x, tid_y, 1);
dim3 grids(bid_x, bid_y, 1);
StackGradKernelForLastDim<T, IndexT><<<grids, blocks, 0, ctx.stream()>>>(
out.data<T>(),
out_num,
dy_pre, dy_pre,
split_dim, tile_x_num,
dy_suf,
split_dim,
reinterpret_cast<T**>(tmp_out_data->ptr())); reinterpret_cast<T**>(tmp_out_data->ptr()));
} else { } else {
UnStackHelperCUDAKernel<T, int64_t> int dy_suf = out.numel() / (out_num * dy_pre);
<<<config.block_per_grid.x, auto config =
config.thread_per_block.x, backends::gpu::GetGpuLaunchConfig1D(ctx, dy_pre * out_num * dy_suf);
0,
dev_ctx.stream()>>>(dy_data, UnStackHelperCUDAKernel<T, IndexT>
<<<config.block_per_grid, config.thread_per_block, 0, ctx.stream()>>>(
out.data<T>(),
dy_pre, dy_pre,
split_dim, out_num,
dy_suf, dy_suf,
split_dim, out_num,
reinterpret_cast<T**>(tmp_out_data->ptr())); reinterpret_cast<T**>(tmp_out_data->ptr()));
} }
} }
template <typename T, typename Context>
void StackGradKernel(const Context& dev_ctx,
const DenseTensor& out,
int axis,
std::vector<DenseTensor*> x_grad) {
const auto& dy_dims = out.dims();
int actual_axis = axis < 0 ? axis + dy_dims.size() : axis;
bool use_int32 = out.numel() < std::numeric_limits<int32_t>::max();
int64_t dy_pre = 1;
for (int i = 0; i < actual_axis; ++i) {
dy_pre *= dy_dims[i];
}
if (use_int32) {
LaunchStackGradCUDAKernel<Context, T, int32_t>(
dev_ctx, out, &x_grad, actual_axis, dy_pre);
} else {
LaunchStackGradCUDAKernel<Context, T, int64_t>(
dev_ctx, out, &x_grad, actual_axis, dy_pre);
}
}
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL(stack_grad, PD_REGISTER_KERNEL(stack_grad,
......
...@@ -13,9 +13,7 @@ ...@@ -13,9 +13,7 @@
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/stack_kernel.h" #include "paddle/phi/kernels/stack_kernel.h"
#include "paddle/fluid/memory/memory.h" #include "paddle/fluid/memory/memory.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/fast_divmod.h" #include "paddle/phi/kernels/funcs/fast_divmod.h"
...@@ -135,7 +133,7 @@ void LaunchStackCUDAKernelWithIndexType( ...@@ -135,7 +133,7 @@ void LaunchStackCUDAKernelWithIndexType(
} break; } break;
#define IMPL_STACK_CUDA_KERNEL_HELPER(...) \ #define IMPL_STACK_CUDA_KERNEL_HELPER(...) \
IMPL_STACK_CUDA_KERNEL_CASE(2, ##__VA_ARGS__); \ IMPL_STACK_CUDA_KERNEL_CASE(4, ##__VA_ARGS__); \
IMPL_STACK_CUDA_KERNEL_CASE(8, ##__VA_ARGS__); \ IMPL_STACK_CUDA_KERNEL_CASE(8, ##__VA_ARGS__); \
IMPL_STACK_CUDA_KERNEL_CASE(16, ##__VA_ARGS__); \ IMPL_STACK_CUDA_KERNEL_CASE(16, ##__VA_ARGS__); \
IMPL_STACK_CUDA_KERNEL_CASE(32, ##__VA_ARGS__); \ IMPL_STACK_CUDA_KERNEL_CASE(32, ##__VA_ARGS__); \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册