From a635a8a5c4ec914f57058bdcccf854d620ce5f42 Mon Sep 17 00:00:00 2001 From: Ruibiao Chen Date: Fri, 26 Aug 2022 22:34:46 +0800 Subject: [PATCH] Move conv2d_transpose_grad XPU kernel to PHI, test=kunlun (#45466) --- .../fluid/operators/conv_transpose_op_xpu.cc | 116 ------------------ .../kernels/xpu/conv_transpose_grad_kernel.cc | 106 ++++++++++++++++ 2 files changed, 106 insertions(+), 116 deletions(-) delete mode 100644 paddle/fluid/operators/conv_transpose_op_xpu.cc create mode 100644 paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc diff --git a/paddle/fluid/operators/conv_transpose_op_xpu.cc b/paddle/fluid/operators/conv_transpose_op_xpu.cc deleted file mode 100644 index 882bd0b091e..00000000000 --- a/paddle/fluid/operators/conv_transpose_op_xpu.cc +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include - -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/conv_transpose_op.h" -#include "paddle/fluid/platform/device/device_wrapper.h" -#include "paddle/phi/kernels/cpu/conv_util.h" - -#ifdef PADDLE_WITH_XPU -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; - -template -class Conv2DTransposeGradXPUKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor* input = context.Input("Input"); - const Tensor* output_grad = - context.Input(framework::GradVarName("Output")); - Tensor* input_grad = - context.Output(framework::GradVarName("Input")); - Tensor* filter_grad = - context.Output(framework::GradVarName("Filter")); - // The filter and filter_grad will be reshaped in the calculations, - // so here use an assignment operation, - // that avoids modifying the variable in the Scope. - Tensor filter = *context.Input("Filter"); - if (!input_grad && !filter_grad) return; - int groups = context.Attr("groups"); - std::vector strides = context.Attr>("strides"); - std::vector paddings = context.Attr>("paddings"); - std::vector dilations = context.Attr>("dilations"); - const std::string data_format = context.Attr("data_format"); - const std::string padding_algorithm = - context.Attr("padding_algorithm"); - - PADDLE_ENFORCE_EQ( - data_format == "NHWC" || data_format == "NDHWC", - false, - platform::errors::InvalidArgument( - ("XPU do support data_format is NCHW in conv grad op."))); - - framework::DDim in_data_dims = - phi::slice_ddim(input->dims(), 2, input->dims().size()); - framework::DDim filter_data_dims = - phi::slice_ddim(filter.dims(), 2, filter.dims().size()); - std::vector ksize = phi::vectorize(filter_data_dims); - phi::UpdatePaddingAndDilation( - &paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize); - - const int batch_size = static_cast(input->dims()[0]); - const int img_yc = static_cast(input->dims()[1]); - const int img_yh = static_cast(input->dims()[2]); - const int img_yw = static_cast(input->dims()[3]); - const int img_xc = static_cast(output_grad->dims()[1]); - const int img_xh = static_cast(output_grad->dims()[2]); - const int img_xw = static_cast(output_grad->dims()[3]); - if (input_grad) { - input_grad->mutable_data(context.GetPlace()); - } - if (filter_grad) { - filter_grad->mutable_data(context.GetPlace()); - } - - auto& dev_ctx = context.template device_context(); - int r = xpu::conv2d_transpose_grad( - dev_ctx.x_context(), - input->data(), - filter.data(), - output_grad->data(), - input_grad ? input_grad->data() : nullptr, - filter_grad ? filter_grad->data() : nullptr, - batch_size, - img_yc, - img_yh, - img_yw, - img_xc, - img_xh, - img_xw, - ksize, - strides, - paddings, - dilations, - groups, - nullptr, - nullptr, - nullptr, - nullptr, - nullptr, - true); - PADDLE_ENFORCE_XDNN_SUCCESS(r, "conv2d_transpose_grad"); - } -}; - -} // namespace operators -} // namespace paddle -namespace ops = paddle::operators; -REGISTER_OP_XPU_KERNEL( - conv2d_transpose_grad, - ops::Conv2DTransposeGradXPUKernel); -#endif diff --git a/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc b/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc new file mode 100644 index 00000000000..49061069b8c --- /dev/null +++ b/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc @@ -0,0 +1,106 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/conv_transpose_grad_kernel.h" + +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/conv_util.h" + +namespace phi { +template +void Conv2dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + // The filter and dfilter will be reshaped in the calculations, + // so here use an assignment operation, + // that avoids modifying the variable in the Scope. + DenseTensor filter_ = filter; + if (!dx && !dfilter) return; + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + PADDLE_ENFORCE_EQ( + data_format == "NHWC" || data_format == "NDHWC", + false, + errors::InvalidArgument( + ("XPU do support data_format is NCHW in conv grad op."))); + + DDim in_data_dims = slice_ddim(x.dims(), 2, x.dims().size()); + DDim filter_data_dims = slice_ddim(filter_.dims(), 2, filter_.dims().size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, in_data_dims, strides, ksize); + + const int batch_size = static_cast(x.dims()[0]); + const int img_yc = static_cast(x.dims()[1]); + const int img_yh = static_cast(x.dims()[2]); + const int img_yw = static_cast(x.dims()[3]); + const int img_xc = static_cast(dout.dims()[1]); + const int img_xh = static_cast(dout.dims()[2]); + const int img_xw = static_cast(dout.dims()[3]); + if (dx) { + ctx.template Alloc(dx); + } + if (dfilter) { + ctx.template Alloc(dfilter); + } + + int r = xpu::conv2d_transpose_grad( + ctx.x_context(), + x.data(), + filter_.data(), + dout.data(), + dx ? dx->data() : nullptr, + dfilter ? dfilter->data() : nullptr, + batch_size, + img_yc, + img_yh, + img_yw, + img_xc, + img_xh, + img_xw, + ksize, + strides, + paddings_, + dilations_, + groups, + nullptr, + nullptr, + nullptr, + nullptr, + nullptr, + true); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "conv2d_transpose_grad"); +} + +} // namespace phi + +PD_REGISTER_KERNEL(conv2d_transpose_grad, + XPU, + ALL_LAYOUT, + phi::Conv2dTransposeGradKernel, + float) {} -- GitLab