未验证 提交 702bce57 编写于 作者: F fwenguang 提交者: GitHub

[MLU] add pool2d and pool2d_grad mlu kernel (#39453)

上级 d25a7f9e
...@@ -224,11 +224,13 @@ MLUCnnlActivationDesc::~MLUCnnlActivationDesc() { ...@@ -224,11 +224,13 @@ MLUCnnlActivationDesc::~MLUCnnlActivationDesc() {
MLUCnnlPoolingDesc::MLUCnnlPoolingDesc( MLUCnnlPoolingDesc::MLUCnnlPoolingDesc(
const cnnlPoolingMode_t mode, const cnnlNanPropagation_t maxpooling_nan_opt, const cnnlPoolingMode_t mode, const cnnlNanPropagation_t maxpooling_nan_opt,
int window_rows, int window_cols, int64_t pad_up, int64_t pad_down, int window_rows, int window_cols, int64_t pad_up, int64_t pad_down,
int64_t pad_left, int64_t pad_right, int row_stride, int col_stride) { int64_t pad_left, int64_t pad_right, int row_stride, int col_stride,
int row_dilation, int col_dilation, bool ceil_mode) {
PADDLE_ENFORCE_MLU_SUCCESS(cnnlCreatePoolingDescriptor(&pooling_desc_)); PADDLE_ENFORCE_MLU_SUCCESS(cnnlCreatePoolingDescriptor(&pooling_desc_));
PADDLE_ENFORCE_MLU_SUCCESS(cnnlSetPooling2dDescriptor( PADDLE_ENFORCE_MLU_SUCCESS(cnnlSetPooling2dDescriptor_v2(
pooling_desc_, mode, maxpooling_nan_opt, window_rows, window_cols, pad_up, pooling_desc_, mode, maxpooling_nan_opt, window_rows, window_cols, pad_up,
pad_down, pad_left, pad_right, row_stride, col_stride)); pad_down, pad_left, pad_right, row_stride, col_stride, row_dilation,
col_dilation, ceil_mode));
} }
MLUCnnlPoolingDesc::MLUCnnlPoolingDesc( MLUCnnlPoolingDesc::MLUCnnlPoolingDesc(
...@@ -1125,17 +1127,16 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() { ...@@ -1125,17 +1127,16 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {
} }
/* static */ void MLUCnnl::PoolingForward( /* static */ void MLUCnnl::PoolingForward(
const ExecutionContext& ctx, cnnlPoolingMode_t pool_mode, const ExecutionContext& ctx, cnnlPoolingMode_t pool_mode, int64_t output_h,
const std::vector<int64_t>& output_shape, int64_t output_w, const cnnlPoolingDescriptor_t pooling_desc,
const cnnlPoolingDescriptor_t pooling_desc, const void* alpha, const void* alpha, const cnnlTensorDescriptor_t input_desc,
const cnnlTensorDescriptor_t input_desc, const void* input, const void* input, const void* beta, const void* extra_input_ptr,
const void* beta, const void* extra_input_ptr,
const cnnlTensorDescriptor_t output_desc, void* output) { const cnnlTensorDescriptor_t output_desc, void* output) {
cnnlHandle_t handle = GetHandleFromCTX(ctx); cnnlHandle_t handle = GetHandleFromCTX(ctx);
size_t workspace_size = 0; size_t workspace_size = 0;
PADDLE_ENFORCE_MLU_SUCCESS(cnnlGetPoolingWorkspaceSize( PADDLE_ENFORCE_MLU_SUCCESS(cnnlGetPoolingWorkspaceSize(
handle, pool_mode, output_shape[2], output_shape[1], &workspace_size)); handle, pool_mode, output_w, output_h, &workspace_size));
auto& dev_ctx = GetDevCtxFromCTX(ctx); auto& dev_ctx = GetDevCtxFromCTX(ctx);
Tensor workspace = ctx.AllocateTmpTensor<int8_t, MLUDeviceContext>( Tensor workspace = ctx.AllocateTmpTensor<int8_t, MLUDeviceContext>(
......
...@@ -236,7 +236,8 @@ class MLUCnnlPoolingDesc { ...@@ -236,7 +236,8 @@ class MLUCnnlPoolingDesc {
const cnnlNanPropagation_t maxpooling_nan_opt, const cnnlNanPropagation_t maxpooling_nan_opt,
int window_rows, int window_cols, int64_t pad_up, int window_rows, int window_cols, int64_t pad_up,
int64_t pad_down, int64_t pad_left, int64_t pad_right, int64_t pad_down, int64_t pad_left, int64_t pad_right,
int row_stride, int col_stride); int row_stride, int col_stride, int row_dilation,
int col_dilation, bool ceil_mode);
MLUCnnlPoolingDesc(const cnnlPoolingMode_t mode, MLUCnnlPoolingDesc(const cnnlPoolingMode_t mode,
const cnnlNanPropagation_t maxpooling_nan_opt, const cnnlNanPropagation_t maxpooling_nan_opt,
...@@ -643,10 +644,9 @@ class MLUCnnl { ...@@ -643,10 +644,9 @@ class MLUCnnl {
static void PoolingForward( static void PoolingForward(
const ExecutionContext& ctx, cnnlPoolingMode_t pool_mode, const ExecutionContext& ctx, cnnlPoolingMode_t pool_mode,
const std::vector<int64_t>& output_shape, int64_t output_h, int64_t output_w, cnnlPoolingDescriptor_t pooling_desc,
cnnlPoolingDescriptor_t pooling_desc, const void* alpha, const void* alpha, const cnnlTensorDescriptor_t input_desc,
const cnnlTensorDescriptor_t input_desc, const void* input, const void* input, const void* beta, const void* extra_input_ptr,
const void* beta, const void* extra_input_ptr,
const cnnlTensorDescriptor_t output_desc, void* output); const cnnlTensorDescriptor_t output_desc, void* output);
static void Pool3D(const ExecutionContext& ctx, cnnlPoolingMode_t pool_mode, static void Pool3D(const ExecutionContext& ctx, cnnlPoolingMode_t pool_mode,
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/pool_op.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h"
namespace paddle {
namespace operators {
namespace {
cnnlPoolingMode_t ToCnnlPoolingMode(const std::string &pooling_type,
bool exclusive) {
cnnlPoolingMode_t pooling_mode;
if (pooling_type == "max") {
pooling_mode = CNNL_POOLING_MAX;
} else if (pooling_type == "avg") {
if (exclusive) {
pooling_mode = CNNL_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
} else {
pooling_mode = CNNL_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
}
} else {
PADDLE_THROW(platform::errors::InvalidArgument("Unknown pooling_type: %s",
pooling_type));
}
return pooling_mode;
}
} // namespace
template <typename T>
class MLUPoolOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &dev_ctx = ctx.template device_context<platform::MLUDeviceContext>();
const Tensor *in_x = ctx.Input<Tensor>("X");
Tensor *out = ctx.Output<Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string data_format = ctx.Attr<std::string>("data_format");
bool global_pooling = ctx.Attr<bool>("global_pooling");
bool ceil_mode = ctx.Attr<bool>("ceil_mode");
bool exclusive = ctx.Attr<bool>("exclusive");
bool adaptive = ctx.Attr<bool>("adaptive");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
PADDLE_ENFORCE_EQ(in_x->dims().size(), 4,
platform::errors::InvalidArgument(
"Only support 4-dims for mlu pool2d kernel."));
PADDLE_ENFORCE_EQ(adaptive, false,
platform::errors::InvalidArgument(
"Not support adaptive for mlu pool2d kernel."));
// default
cnnlTensorLayout_t cnnl_layout = CNNL_LAYOUT_NCHW;
auto out_dims = out->dims();
int64_t out_h = out_dims[2];
int64_t out_w = out_dims[3];
auto in_x_dims = in_x->dims();
framework::DDim data_dims =
framework::slice_ddim(in_x_dims, 2, in_x_dims.size());
const bool channel_last = data_format == "NHWC";
if (channel_last) {
cnnl_layout = CNNL_LAYOUT_NHWC;
out_h = out_dims[1];
out_w = out_dims[2];
data_dims = framework::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
MLUCnnlTensorDesc in_x_desc(*in_x, cnnl_layout, ToCnnlDataType<T>());
MLUCnnlTensorDesc out_desc(*out, cnnl_layout, ToCnnlDataType<T>());
cnnlPoolingMode_t pool_mode = ToCnnlPoolingMode(pooling_type, exclusive);
MLUCnnlPoolingDesc pool_desc(
pool_mode, CNNL_NOT_PROPAGATE_NAN, ksize[0], ksize[1], paddings[0],
paddings[1], paddings[2], paddings[3], strides[0], strides[1],
1 /*row_dilation*/, 1 /*col_dilation*/, ceil_mode);
size_t extra_input_size = 0;
cnnlHandle_t handle =
ctx.template device_context<MLUDeviceContext>().cnnl_handle();
cnnlGetPoolingExtraInputSize(handle, pool_mode, out_w, out_h,
&extra_input_size);
if (extra_input_size > 0) {
paddle::platform::CPUDeviceContext cpu_ctx;
framework::Tensor extra_host_tensor =
ctx.AllocateTmpTensor<int8_t, platform::CPUDeviceContext>(
{static_cast<int64_t>(extra_input_size)}, cpu_ctx);
cnnlInitPoolingExtraInput(handle, pool_desc.get(), in_x_desc.get(),
out_desc.get(), GetBasePtr(&extra_host_tensor));
framework::Tensor extra_device_tensor =
ctx.AllocateTmpTensor<int8_t, MLUDeviceContext>(
{static_cast<int64_t>(extra_input_size)}, dev_ctx);
// TODO(fwg): use Async copy, and add a callback to stream that free host
// memory.
framework::TensorCopySync(extra_host_tensor, ctx.GetPlace(),
&extra_device_tensor);
MLUCnnl::PoolingForward(
ctx, pool_mode, out_h, out_w, pool_desc.get(), nullptr /*alpha*/,
in_x_desc.get(), GetBasePtr(in_x), nullptr /*beta*/,
GetBasePtr(&extra_device_tensor) /*params_shape_ptr*/, out_desc.get(),
GetBasePtr(out));
} else {
MLUCnnl::PoolingForward(
ctx, pool_mode, out_h, out_w, pool_desc.get(), nullptr /*alpha*/,
in_x_desc.get(), GetBasePtr(in_x), nullptr /*beta*/,
nullptr /*params_shape_ptr*/, out_desc.get(), GetBasePtr(out));
}
}
};
template <typename T, typename IDX_T>
class MLUPoolGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &dev_ctx = ctx.template device_context<platform::MLUDeviceContext>();
const Tensor *in_x = ctx.Input<Tensor>("X");
const Tensor *out = ctx.Input<Tensor>("Out");
const Tensor *out_grad = ctx.Input<Tensor>(framework::GradVarName("Out"));
Tensor *in_x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
in_x_grad->mutable_data<T>(ctx.GetPlace());
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
bool ceil_mode = ctx.Attr<bool>("ceil_mode");
bool exclusive = ctx.Attr<bool>("exclusive");
bool adaptive = ctx.Attr<bool>("adaptive");
std::string data_format = ctx.Attr<std::string>("data_format");
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
const bool channel_last = data_format == "NHWC";
auto in_x_dims = in_x->dims();
framework::DDim data_dims =
framework::slice_ddim(in_x_dims, 2, in_x_dims.size());
if (channel_last) {
data_dims = framework::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
}
UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
data_dims, strides, ksize);
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
}
// inputs need with NHWC layout
framework::Tensor trans_in_x;
framework::Tensor trans_out;
framework::Tensor trans_out_grad;
framework::Tensor trans_in_x_grad;
if (channel_last) {
trans_in_x = *in_x;
trans_out = *out;
trans_out_grad = *out_grad;
trans_in_x_grad = *in_x_grad;
} else {
std::vector<int> perm{0, 2, 3, 1};
TransposeFromMLUTensor<T>(ctx, perm, in_x, &trans_in_x,
true /*need_reshape_or_alloc*/);
TransposeFromMLUTensor<T>(ctx, perm, out, &trans_out,
true /*need_reshape_or_alloc*/);
TransposeFromMLUTensor<T>(ctx, perm, out_grad, &trans_out_grad,
true /*need_reshape_or_alloc*/);
auto in_x_grad_dims = in_x_grad->dims();
trans_in_x_grad = ctx.AllocateTmpTensor<T, MLUDeviceContext>(
{in_x_grad_dims[0], in_x_grad_dims[2], in_x_grad_dims[3],
in_x_grad_dims[1]},
dev_ctx);
}
MLUCnnlTensorDesc trans_in_x_desc(trans_in_x, CNNL_LAYOUT_NHWC,
ToCnnlDataType<T>());
MLUCnnlTensorDesc trans_out_desc(trans_out, CNNL_LAYOUT_NHWC,
ToCnnlDataType<T>());
MLUCnnlTensorDesc trans_out_grad_desc(trans_out_grad, CNNL_LAYOUT_NHWC,
ToCnnlDataType<T>());
MLUCnnlTensorDesc trans_in_x_grad_desc(trans_in_x_grad, CNNL_LAYOUT_NHWC,
ToCnnlDataType<T>());
cnnlPoolingMode_t pool_mode = ToCnnlPoolingMode(pooling_type, exclusive);
MLUCnnlPoolingDesc pool_desc(
pool_mode, CNNL_NOT_PROPAGATE_NAN, ksize[0], ksize[1], paddings[0],
paddings[1], paddings[2], paddings[3], strides[0], strides[1],
1 /*row_dilation*/, 1 /*col_dilation*/, ceil_mode);
if (pooling_type == "max") {
framework::Tensor index_tensor =
ctx.AllocateTmpTensor<IDX_T, MLUDeviceContext>(trans_out_grad.dims(),
dev_ctx);
MLUCnnlTensorDesc index_tensor_desc(index_tensor, CNNL_LAYOUT_NHWC,
ToCnnlDataType<IDX_T>());
MLUCnnl::PoolingIndex(ctx, pool_desc.get(), trans_in_x_desc.get(),
GetBasePtr(&trans_in_x), index_tensor_desc.get(),
GetBasePtr(&index_tensor));
MLUCnnl::PoolingBackward(
ctx, pool_desc.get(), nullptr /*alpha*/, index_tensor_desc.get(),
GetBasePtr(&index_tensor), trans_out_grad_desc.get(),
GetBasePtr(&trans_out_grad), trans_in_x_desc.get(),
GetBasePtr(&trans_in_x), nullptr /*beta*/, trans_in_x_grad_desc.get(),
GetBasePtr(&trans_in_x_grad));
} else {
MLUCnnl::PoolingBackward(ctx, pool_desc.get(), nullptr /*alpha*/, nullptr,
nullptr, trans_out_grad_desc.get(),
GetBasePtr(&trans_out_grad), nullptr, nullptr,
nullptr /*beta*/, trans_in_x_grad_desc.get(),
GetBasePtr(&trans_in_x_grad));
}
if (!channel_last) {
std::vector<int> perm{0, 3, 1, 2};
TransposeFromMLUTensor<T>(ctx, perm, &trans_in_x_grad, in_x_grad,
false /*need_reshape_or_alloc*/);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_MLU_KERNEL(pool2d, ops::MLUPoolOpKernel<float>,
ops::MLUPoolOpKernel<plat::float16>);
REGISTER_OP_MLU_KERNEL(pool2d_grad, ops::MLUPoolGradOpKernel<float, int>,
ops::MLUPoolGradOpKernel<plat::float16, int16_t>);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册