From 0ebb3b5c2d122e8637268dc4d3b5ae3e03a1fe44 Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Thu, 10 Oct 2019 16:04:22 +0800 Subject: [PATCH] [cherry-pick]mv two function in conv op for good code style (#20116) test=release/1.6 (#20268) * Delete PadFuntion, include padding.h instead. * move function(IsSymmetricPadding) from conv_cudnn_op.cu/conv_transpose_cudnn_op.cu to padding.h. --- paddle/fluid/operators/conv_cudnn_op.cu | 61 ++++--------------- .../operators/conv_transpose_cudnn_op.cu | 18 +----- paddle/fluid/operators/math/padding.h | 13 ++++ 3 files changed, 27 insertions(+), 65 deletions(-) diff --git a/paddle/fluid/operators/conv_cudnn_op.cu b/paddle/fluid/operators/conv_cudnn_op.cu index b38461f8cc9..14c119a40ca 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu +++ b/paddle/fluid/operators/conv_cudnn_op.cu @@ -21,6 +21,7 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_cudnn_helper.h" #include "paddle/fluid/operators/conv_cudnn_op_cache.h" #include "paddle/fluid/operators/conv_op.h" +#include "paddle/fluid/operators/math/padding.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/float16.h" @@ -59,44 +60,6 @@ static inline void GetNCDHW(const framework::DDim& dims, } } -static inline bool IsSymmetricPadding(const std::vector& paddings, - const int data_dim) { - bool is_sys_pad = true; - if (paddings.size() == data_dim * 2) { - for (size_t i = 0; i < data_dim; ++i) { - if (paddings[2 * i] != paddings[2 * i + 1]) { - is_sys_pad = false; - return is_sys_pad; - } - } - } - return is_sys_pad; -} - -template -using EigenTensor = framework::EigenTensor; - -template -static void PadFunction(const framework::ExecutionContext& context, - const std::vector& pads, - const framework::Tensor& src, T pad_value, - framework::Tensor* out) { - Eigen::array, D> paddings; - - for (size_t i = 0; i < paddings.size(); ++i) { - paddings[i].first = pads[i * 2]; - paddings[i].second = pads[i * 2 + 1]; - } - - auto src_tensor = EigenTensor::From(src); - auto out_tensor = EigenTensor::From(*out); - - auto& place = - *context.template device_context().eigen_device(); - out_tensor.device(place) = src_tensor.pad(paddings, pad_value); -} - template static void Slice_2(const framework::ExecutionContext& context, const Tensor* input, Tensor* out, @@ -192,7 +155,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = IsSymmetricPadding(paddings, data_dim); + bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input; std::vector padding_common(data_dim, 0); @@ -225,12 +188,12 @@ class CUDNNConvOpKernel : public framework::OpKernel { T pad_value(0.0); switch (rank) { case 4: { - PadFunction( + math::PadFunction( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { - PadFunction( + math::PadFunction( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; @@ -404,7 +367,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // cuDNN only supports padding the same amount on every dimension. // So we create a new padded input tensor. int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = IsSymmetricPadding(paddings, data_dim); + bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_input(input->type()); Tensor transformed_input_grad(input->type()); std::vector padding_common(data_dim, 0); @@ -446,12 +409,12 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { T pad_value(0.0); switch (rank) { case 4: { - PadFunction( + math::PadFunction( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; case 5: { - PadFunction( + math::PadFunction( ctx, input_pad, transformed_input_channel, pad_value, &transformed_input); } break; @@ -737,7 +700,7 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = IsSymmetricPadding(paddings, data_dim); + bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); Tensor transformed_X(X->type()); Tensor transformed_ddX(X->type()); @@ -786,16 +749,16 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { T pad_value(0.0); switch (rank) { case 4: { - PadFunction( + math::PadFunction( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); - PadFunction( + math::PadFunction( ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } break; case 5: { - PadFunction( + math::PadFunction( ctx, input_pad, transformed_X_channel, pad_value, &transformed_X); - PadFunction( + math::PadFunction( ctx, input_pad, transformed_ddX_channel, pad_value, &transformed_ddX); } break; diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu b/paddle/fluid/operators/conv_transpose_cudnn_op.cu index 0dc80d8f29c..82eb5712401 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu @@ -51,20 +51,6 @@ static void DataTranspose(const framework::ExecutionContext& ctx, transpose(dev_ctx, *input, output, axis); } -static inline bool IsSymmetricPadding(const std::vector& paddings, - const int data_dim) { - bool is_sys_pad = true; - if (paddings.size() == data_dim * 2) { - for (size_t i = 0; i < data_dim; ++i) { - if (paddings[2 * i] != paddings[2 * i + 1]) { - is_sys_pad = false; - return is_sys_pad; - } - } - } - return is_sys_pad; -} - template class CUDNNConvTransposeOpKernel : public framework::OpKernel { public: @@ -124,7 +110,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = IsSymmetricPadding(paddings, data_dim); + bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); std::vector input_pad(input_transpose.dims().size() * 2, 0); Tensor transformed_input; @@ -373,7 +359,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { in_data_dims, strides, ksize); int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = IsSymmetricPadding(paddings, data_dim); + bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim); std::vector input_pad(input_transpose.dims().size() * 2, 0); Tensor transformed_output_grad; diff --git a/paddle/fluid/operators/math/padding.h b/paddle/fluid/operators/math/padding.h index 8bacb5def44..83cc9a2ca9b 100644 --- a/paddle/fluid/operators/math/padding.h +++ b/paddle/fluid/operators/math/padding.h @@ -119,6 +119,19 @@ void PaddingGradFunctor(int rank, const framework::ExecutionContext& context, } } +inline bool IsSymmetricPadding(const std::vector& pads, + const int data_dim) { + bool is_sys_pad = true; + if (pads.size() == data_dim * 2) { + for (size_t i = 0; i < data_dim; ++i) { + if (pads[2 * i] != pads[2 * i + 1]) { + is_sys_pad = false; + return is_sys_pad; + } + } + } + return is_sys_pad; +} } // namespace math } // namespace operators } // namespace paddle -- GitLab