From bf379fef961319ed213df7193cf7bd3997f0c095 Mon Sep 17 00:00:00 2001 From: wangchaochaohu Date: Mon, 4 Nov 2019 19:12:29 +0800 Subject: [PATCH] refine code for code reuse test=develop (#20988) --- paddle/fluid/operators/conv_cudnn_helper.h | 58 ++++++++++++++++++++ paddle/fluid/operators/conv_cudnn_op.cu | 63 ++-------------------- 2 files changed, 62 insertions(+), 59 deletions(-) diff --git a/paddle/fluid/operators/conv_cudnn_helper.h b/paddle/fluid/operators/conv_cudnn_helper.h index 3679aeb1b0c..ed4a09aed41 100644 --- a/paddle/fluid/operators/conv_cudnn_helper.h +++ b/paddle/fluid/operators/conv_cudnn_helper.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include #include @@ -23,6 +24,63 @@ limitations under the License. */ namespace paddle { namespace operators { +using Tensor = framework::Tensor; +using DataLayout = platform::DataLayout; +template +using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; +using framework::AlgorithmsCache; +static inline void GetNCDHW(const framework::DDim& dims, + const DataLayout& layout, int* N, int* C, int* D, + int* H, int* W) { + *N = dims[0]; + *C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; + int i = layout == DataLayout::kNCHW ? 0 : 1; + if (dims.size() == 5) { + *D = dims[2 - i]; + *H = dims[3 - i]; + *W = dims[4 - i]; + } else { + *D = 1; + *H = dims[2 - i]; + *W = dims[3 - i]; + } +} + +template +static void RemovePaddingSlice(const framework::ExecutionContext& context, + const Tensor* input, Tensor* out, + const std::vector& starts, + const std::vector& axes) { + auto& place = + *context.template device_context().eigen_device(); + auto in_dims = input->dims(); + auto new_out_dims = out->dims(); + auto offsets = Eigen::array(); + auto extents = Eigen::array(); + for (size_t i = 0; i < D; ++i) { + offsets[i] = 0; + extents[i] = new_out_dims[i]; + } + + int start; + for (size_t i = 0; i < axes.size(); ++i) { + start = starts[i]; + if (start < 0) { + start = (start + in_dims[axes[i]]); + } + start = std::max(start, 0); + offsets[axes[i]] = start; + } + auto in_t = + framework::EigenTensor::From( + *input); + + auto out_t = + framework::EigenTensor::From( + *out, new_out_dims); + out_t.device(place) = in_t.slice(offsets, extents); +} + template std::ostream& operator<<(std::ostream& out, const std::vector& v) { out << "["; diff --git a/paddle/fluid/operators/conv_cudnn_op.cu b/paddle/fluid/operators/conv_cudnn_op.cu index 274da9abf08..c86c9a868f5 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu +++ b/paddle/fluid/operators/conv_cudnn_op.cu @@ -39,61 +39,6 @@ using ScopedTensorDescriptor = platform::ScopedTensorDescriptor; using ScopedFilterDescriptor = platform::ScopedFilterDescriptor; using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor; using DataLayout = platform::DataLayout; -template -using ScalingParamType = typename platform::CudnnDataType::ScalingParamType; -using framework::AlgorithmsCache; - -static inline void GetNCDHW(const framework::DDim& dims, - const DataLayout& layout, int* N, int* C, int* D, - int* H, int* W) { - *N = dims[0]; - *C = layout == DataLayout::kNCHW ? dims[1] : dims[dims.size() - 1]; - int i = layout == DataLayout::kNCHW ? 0 : 1; - if (dims.size() == 5) { - *D = dims[2 - i]; - *H = dims[3 - i]; - *W = dims[4 - i]; - } else { - *D = 1; - *H = dims[2 - i]; - *W = dims[3 - i]; - } -} - -template -static void Slice_2(const framework::ExecutionContext& context, - const Tensor* input, Tensor* out, - const std::vector& starts, - const std::vector& axes) { - auto& place = - *context.template device_context().eigen_device(); - auto in_dims = input->dims(); - auto new_out_dims = out->dims(); - auto offsets = Eigen::array(); - auto extents = Eigen::array(); - for (size_t i = 0; i < D; ++i) { - offsets[i] = 0; - extents[i] = new_out_dims[i]; - } - - int start; - for (size_t i = 0; i < axes.size(); ++i) { - start = starts[i]; - if (start < 0) { - start = (start + in_dims[axes[i]]); - } - start = std::max(start, 0); - offsets[axes[i]] = start; - } - auto in_t = - framework::EigenTensor::From( - *input); - - auto out_t = - framework::EigenTensor::From( - *out, new_out_dims); - out_t.device(place) = in_t.slice(offsets, extents); -} template class CUDNNConvOpKernel : public framework::OpKernel { @@ -551,11 +496,11 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { transformed_input_grad_channel.mutable_data(ctx.GetPlace()); if (transformed_input_channel.dims().size() == 4) { - Slice_2( + RemovePaddingSlice( ctx, &transformed_input_grad, &transformed_input_grad_channel, starts, axes); } else { - Slice_2( + RemovePaddingSlice( ctx, &transformed_input_grad, &transformed_input_grad_channel, starts, axes); } @@ -994,10 +939,10 @@ class CUDNNConvDoubleGradOpKernel : public framework::OpKernel { axes[i] = i; } if (X->dims().size() == 4) { - Slice_2( + RemovePaddingSlice( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } else { - Slice_2( + RemovePaddingSlice( ctx, &transformed_dX, &transformed_dX_channel, starts, axes); } } -- GitLab