diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc index bf2cf58f970addf1dac9f4871ba4abe09c3c7b38..17663ecf6baa35f698aca35e451de34c647d2214 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_conv_bn_fuse_pass_tester.cc @@ -28,7 +28,7 @@ USE_OP_ITSELF(batch_norm); USE_OP_DEVICE_KERNEL(batch_norm, MKLDNN); -USE_OP(conv2d_transpose); +USE_OP_ITSELF(conv2d_transpose); USE_OP_DEVICE_KERNEL(conv2d_transpose, MKLDNN); USE_OP_ITSELF(elementwise_add); USE_OP_DEVICE_KERNEL(elementwise_add, MKLDNN); diff --git a/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc index b96992ef8514abe0f71dbf23d38abb626f6c4a5b..a856d1414446914909a1801d4175431896ee8de1 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" USE_OP_ITSELF(conv2d); -USE_OP(conv2d_transpose); +USE_OP_ITSELF(conv2d_transpose); namespace paddle { namespace inference { diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu b/paddle/fluid/operators/conv_transpose_cudnn_op.cu deleted file mode 100644 index 1841b78af32dd95d6884d5eb78ad30322ba7723e..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu +++ /dev/null @@ -1,1286 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/memory/memory.h" -#ifdef PADDLE_WITH_HIP -#include "paddle/fluid/operators/conv_miopen_helper.h" -#else -#include "paddle/fluid/operators/conv_cudnn_helper.h" -#endif -#include "paddle/fluid/operators/conv_transpose_op.h" -#include "paddle/phi/kernels/funcs/math_function.h" -#include "paddle/phi/kernels/funcs/padding.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; - -template -static void DataTranspose(const framework::ExecutionContext& ctx, - const Tensor* input, Tensor* output, - const std::vector& axis, int flag = 0) { - auto& dev_ctx = ctx.template device_context(); - phi::funcs::Transpose transpose; - auto in_dims = input->dims(); - std::vector input_transpose_vec; - for (size_t i = 0; i < axis.size(); ++i) { - if (flag == 0) - input_transpose_vec.push_back(in_dims[axis[i]]); - else - input_transpose_vec.push_back(in_dims[i]); - } - framework::DDim input_transpose_dims(phi::make_ddim(input_transpose_vec)); - output->mutable_data(input_transpose_dims, ctx.GetPlace()); - transpose(dev_ctx, *input, output, axis); -} - -template -class CUDNNConvTransposeOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE_EQ( - platform::is_gpu_place(ctx.GetPlace()), true, - paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); - auto* input = ctx.Input("Input"); - auto* filter = ctx.Input("Filter"); - auto* output = ctx.Output("Output"); - - std::vector strides = ctx.Attr>("strides"); - std::vector paddings = ctx.Attr>("paddings"); - std::string padding_algorithm = ctx.Attr("padding_algorithm"); - - // cudnn v5 does not support dilations - std::vector dilations = ctx.Attr>("dilations"); - int groups = ctx.Attr("groups"); - const T* filter_data = filter->data(); - const std::string data_layout_str = ctx.Attr("data_format"); - const paddle::platform::DataLayout data_layout = - (data_layout_str != "NHWC" ? platform::DataLayout::kNCHW - : platform::DataLayout::kNHWC); - - // if channel_last, transpose to channel_first - Tensor input_transpose; - std::vector input_vec = phi::vectorize(input->dims()); - std::vector output_vec = phi::vectorize(output->dims()); - if (data_layout == platform::DataLayout::kNHWC) { - if (strides.size() == 2U) { - std::vector axis = {0, 3, 1, 2}; - for (size_t i = 0; i < axis.size(); ++i) { - input_vec[i] = input->dims()[axis[i]]; - output_vec[i] = output->dims()[axis[i]]; - } - DataTranspose(ctx, input, &input_transpose, axis); - } else if (strides.size() == 3U) { - std::vector axis = {0, 4, 1, 2, 3}; - for (size_t i = 0; i < axis.size(); ++i) { - input_vec[i] = input->dims()[axis[i]]; - output_vec[i] = output->dims()[axis[i]]; - } - DataTranspose(ctx, input, &input_transpose, axis); - } - } else { - input_transpose = *input; - } - - // update padding and dilation - auto in_dims = input_transpose.dims(); - auto filter_dims = filter->dims(); - framework::DDim in_data_dims; - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim); - - std::vector input_pad(input_transpose.dims().size() * 2, 0); - Tensor transformed_input; - std::vector padding_common(data_dim, 0); - if (!is_sys_pad) { - std::vector padding_diff(data_dim); - std::vector new_input_shape_vec(data_dim + 2); - new_input_shape_vec[0] = input_transpose.dims()[0]; - new_input_shape_vec[1] = input_transpose.dims()[1]; - - for (size_t i = 0; i < data_dim; ++i) { - padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); - padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); - new_input_shape_vec[i + 2] = - input_transpose.dims()[i + 2] + padding_diff[i]; - input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; - input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; - } - framework::DDim new_input_shape(phi::make_ddim(new_input_shape_vec)); - transformed_input.Resize(new_input_shape); - auto& dev_ctx = - ctx.template device_context(); - - transformed_input = - ctx.AllocateTmpTensor( - new_input_shape, dev_ctx); - const int rank = input_transpose.dims().size(); - T pad_value(0.0); - switch (rank) { - case 4: { - phi::funcs::PadFunction( - dev_ctx, input_pad, input_transpose, pad_value, - &transformed_input); - } break; - case 5: { - phi::funcs::PadFunction( - dev_ctx, input_pad, input_transpose, pad_value, - &transformed_input); - } break; - default: - PADDLE_THROW(platform::errors::InvalidArgument( - "Op(ConvTranspose) only supports 4-D or 5-D input Tensor.")); - } - } else { - transformed_input = input_transpose; - if (paddings.size() == data_dim) { - for (size_t i = 0; i < data_dim; ++i) { - padding_common[i] = paddings[i]; - } - } else { - for (size_t i = 0; i < data_dim; ++i) { - padding_common[i] = paddings[2 * i]; - } - } - } - - std::vector starts(data_dim, 0); - std::vector ends(data_dim, 0); - std::vector axes(data_dim, 0); - for (size_t i = 0; i < data_dim; ++i) { - starts[i] = input_pad[2 * i + 4] * (strides[i] + 1); - ends[i] = starts[i] + output_vec[i + 2]; - axes[i] = i + 2; - } - - const T* input_data = transformed_input.data(); - input_vec = phi::vectorize(transformed_input.dims()); - - std::vector transformed_output_vec = output_vec; - for (size_t i = 0; i < data_dim; ++i) { - transformed_output_vec[i + 2] = - output_vec[i + 2] + - (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] - - 2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1]; - } - - Tensor transformed_output; - if (!is_sys_pad) { - DDim transformed_output_shape(phi::make_ddim(transformed_output_vec)); - transformed_output.mutable_data(transformed_output_shape, - ctx.GetPlace()); - } else { - output->mutable_data(ctx.GetPlace()); - transformed_output.ShareDataWith(*output); - transformed_output.Resize(phi::make_ddim(transformed_output_vec)); - } - T* transformed_output_data = transformed_output.data(); - - platform::DataLayout layout; - - int iwo_groups = groups; - int c_groups = 1; -#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) - iwo_groups = 1; - c_groups = groups; - groups = 1; -#endif - - if (strides.size() == 2U) { - layout = platform::DataLayout::kNCHW; - } else { - layout = platform::DataLayout::kNCDHW; - } - - size_t workspace_size = 0; -#ifdef PADDLE_WITH_HIP - miopenConvBwdDataAlgorithm_t algo{}; -#else - cudnnConvolutionBwdDataAlgo_t algo{}; -#endif - // ------------------- cudnn conv algorithm --------------------- - auto& dev_ctx = ctx.template device_context(); - auto handle = dev_ctx.cudnn_handle(); - auto layout_tensor = GetCudnnTensorFormat(layout); - bool deterministic = FLAGS_cudnn_deterministic; - - auto dtype = platform::CudnnDataType::type; - // ------------------- cudnn descriptors --------------------- - ConvArgs args{&transformed_output, - filter, - &transformed_input, - strides, - padding_common, - dilations, - dtype}; - args.handle = handle; - args.idesc.set(transformed_output, iwo_groups); - args.wdesc.set(*filter, layout_tensor, iwo_groups); - args.odesc.set(transformed_input, iwo_groups); - args.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_groups); - -#ifdef PADDLE_WITH_HIP - using search = SearchAlgorithm; - workspace_size = std::max(workspace_size, search::GetWorkspaceSize(args)); - algo = search::Find( - args, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search = SearchAlgorithm; - algo = search::Find( - args, false, deterministic, - ctx.template device_context()); - workspace_size = - std::max(workspace_size, search::GetWorkspaceSize(args, algo)); -#endif - - // ------------------- cudnn conv transpose forward --------------------- - int input_offset = - transformed_input.numel() / transformed_input.dims()[0] / groups; - int output_offset = - transformed_output.numel() / transformed_output.dims()[0] / groups; - int filter_offset = filter->numel() / groups; - ScalingParamType alpha = 1.0f; - ScalingParamType beta = 0.0f; - auto workspace_handle = dev_ctx.cudnn_workspace_handle(); - for (int g = 0; g < groups; g++) { -#ifdef PADDLE_WITH_HIP - auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionBackwardData( - handle, &alpha, args.odesc.desc(), - input_data + input_offset * g, args.wdesc.desc(), - filter_data + filter_offset * g, args.cdesc.desc(), algo, &beta, - args.idesc.desc(), transformed_output_data + output_offset * g, - cudnn_workspace, workspace_size)); - }; -#else // PADDLE_WITH_HIP - auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, args.wdesc.desc(), - filter_data + filter_offset * g, args.odesc.desc(), - input_data + input_offset * g, args.cdesc.desc(), algo, - cudnn_workspace, workspace_size, &beta, args.idesc.desc(), - transformed_output_data + output_offset * g)); - }; -#endif // PADDLE_WITH_HIP - workspace_handle.RunFunc(cudnn_func, workspace_size); - } - if (!is_sys_pad && strides.size() == 2U) { - Slice( - ctx, &transformed_output, output, starts, ends, axes); - } else if (!is_sys_pad && strides.size() == 3U) { - Slice( - ctx, &transformed_output, output, starts, ends, axes); - } - - if (data_layout == platform::DataLayout::kNHWC) { - Tensor output_transpose; - Tensor output_nchw; - output_nchw.ShareDataWith(*output); - output_nchw.Resize(phi::make_ddim(output_vec)); - if (strides.size() == 2U) { - std::vector axis = {0, 2, 3, 1}; - DataTranspose(ctx, &output_nchw, &output_transpose, axis); - *output = output_transpose; - } else if (strides.size() == 3U) { - std::vector axis = {0, 2, 3, 4, 1}; - DataTranspose(ctx, &output_nchw, &output_transpose, axis); - *output = output_transpose; - } - } - } -}; - -template -class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE_EQ( - platform::is_gpu_place(ctx.GetPlace()), true, - paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); - auto input = ctx.Input("Input"); - auto filter = ctx.Input("Filter"); - auto output_grad = ctx.Input(framework::GradVarName("Output")); - auto input_grad = ctx.Output(framework::GradVarName("Input")); - auto filter_grad = ctx.Output(framework::GradVarName("Filter")); - const T* filter_data = filter->data(); - - std::vector strides = ctx.Attr>("strides"); - std::vector paddings = ctx.Attr>("paddings"); - // cudnn v5 does not support dilations - std::vector dilations = ctx.Attr>("dilations"); - int groups = ctx.Attr("groups"); - std::string padding_algorithm = ctx.Attr("padding_algorithm"); - int user_workspace_size = ctx.Attr("workspace_size_MB"); - const std::string data_layout_str = ctx.Attr("data_format"); - const paddle::platform::DataLayout data_layout = - (data_layout_str != "NHWC" ? platform::DataLayout::kNCHW - : platform::DataLayout::kNHWC); - - // if channel_last, transpose to channel_first - Tensor input_transpose; - Tensor output_grad_transpose; - std::vector input_vec = phi::vectorize(input->dims()); - std::vector output_vec = phi::vectorize(output_grad->dims()); - if (data_layout == platform::DataLayout::kNHWC) { - if (strides.size() == 2U) { - std::vector axis = {0, 3, 1, 2}; - for (size_t i = 0; i < axis.size(); ++i) { - input_vec[i] = input->dims()[axis[i]]; - output_vec[i] = output_grad->dims()[axis[i]]; - } - DataTranspose(ctx, input, &input_transpose, axis); - DataTranspose(ctx, output_grad, &output_grad_transpose, axis); - } else if (strides.size() == 3U) { - std::vector axis = {0, 4, 1, 2, 3}; - for (size_t i = 0; i < axis.size(); ++i) { - input_vec[i] = input->dims()[axis[i]]; - output_vec[i] = output_grad->dims()[axis[i]]; - } - DataTranspose(ctx, input, &input_transpose, axis); - DataTranspose(ctx, output_grad, &output_grad_transpose, axis); - } - } else { - input_transpose = *input; - output_grad_transpose = *output_grad; - } - - // update padding and dilation - auto in_dims = input_transpose.dims(); - auto filter_dims = filter->dims(); - framework::DDim in_data_dims; - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim); - - std::vector input_pad(input_transpose.dims().size() * 2, 0); - Tensor transformed_output_grad; - std::vector padding_common(data_dim, 0); - if (!is_sys_pad) { - std::vector padding_diff(data_dim); - std::vector new_output_grad_shape_vec(data_dim + 2); - new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0]; - new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1]; - - for (size_t i = 0; i < data_dim; ++i) { - padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); - padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); - new_output_grad_shape_vec[i + 2] = - output_grad_transpose.dims()[i + 2] + padding_diff[i]; - input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; - input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; - } - framework::DDim new_output_grad_shape( - phi::make_ddim(new_output_grad_shape_vec)); - transformed_output_grad.Resize(new_output_grad_shape); - auto& dev_ctx = - ctx.template device_context(); - - transformed_output_grad = - ctx.AllocateTmpTensor( - new_output_grad_shape, dev_ctx); - const int rank = input_transpose.dims().size(); - T pad_value(0.0); - switch (rank) { - case 4: { - phi::funcs::PadFunction( - dev_ctx, input_pad, output_grad_transpose, pad_value, - &transformed_output_grad); - } break; - case 5: { - phi::funcs::PadFunction( - dev_ctx, input_pad, output_grad_transpose, pad_value, - &transformed_output_grad); - } break; - default: - PADDLE_THROW(platform::errors::InvalidArgument( - "Op(ConvTranspose) only supports 4-D or 5-D input Tensor.")); - } - } else { - transformed_output_grad = output_grad_transpose; - if (paddings.size() == data_dim) { - for (size_t i = 0; i < data_dim; ++i) { - padding_common[i] = paddings[i]; - } - } else { - for (size_t i = 0; i < data_dim; ++i) { - padding_common[i] = paddings[2 * i]; - } - } - } - - const T* input_data = input_transpose.data(); - const T* output_grad_data = transformed_output_grad.data(); - output_vec = phi::vectorize(transformed_output_grad.dims()); - - // ------------------- cudnn descriptors --------------------- - platform::DataLayout layout; - - if (strides.size() == 2U) { - layout = platform::DataLayout::kNCHW; - } else { - layout = platform::DataLayout::kNCDHW; - } - - int iwo_groups = groups; - int c_groups = 1; -#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) - iwo_groups = 1; - c_groups = groups; - groups = 1; -#endif - - auto dtype = platform::CudnnDataType::type; - - ConvArgs args1{&transformed_output_grad, - filter, - &input_transpose, - strides, - padding_common, - dilations, - dtype}; - ConvArgs args2{&transformed_output_grad, - filter, - &input_transpose, - strides, - padding_common, - dilations, - dtype}; - -#ifdef PADDLE_WITH_HIP - miopenConvFwdAlgorithm_t data_algo{}; - miopenConvBwdWeightsAlgorithm_t filter_algo{}; -#else - cudnnConvolutionFwdAlgo_t data_algo{}; - cudnnConvolutionBwdFilterAlgo_t filter_algo{}; -#endif - - auto layout_tensor = GetCudnnTensorFormat(layout); - size_t workspace_size = 0; - auto& dev_ctx = ctx.template device_context(); - auto handle = dev_ctx.cudnn_handle(); - bool deterministic = FLAGS_cudnn_deterministic; - T* input_grad_data = nullptr; - T* filter_grad_data = nullptr; - - if (input_grad) { - input_grad_data = input_grad->mutable_data(ctx.GetPlace()); - args1.handle = handle; - args1.idesc.set(transformed_output_grad, iwo_groups); - args1.wdesc.set(*filter, layout_tensor, iwo_groups); - args1.odesc.set(input_transpose, iwo_groups); - args1.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_groups); -#ifdef PADDLE_WITH_HIP - using search1 = SearchAlgorithm; - workspace_size = - std::max(workspace_size, search1::GetWorkspaceSize(args1)); - data_algo = search1::Find( - args1, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search1 = SearchAlgorithm; - data_algo = search1::Find( - args1, false, deterministic, - ctx.template device_context()); - workspace_size = - std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo)); -#endif - } - - if (filter_grad) { - filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); - args2.handle = handle; - args2.idesc.set(transformed_output_grad, iwo_groups); - args2.wdesc.set(*filter_grad, layout_tensor, iwo_groups); - args2.odesc.set(input_transpose, iwo_groups); - args2.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_groups); -#ifdef PADDLE_WITH_HIP - using search2 = SearchAlgorithm; - workspace_size = - std::max(workspace_size, search2::GetWorkspaceSize(args2)); - filter_algo = search2::Find( - args2, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search2 = SearchAlgorithm; - filter_algo = search2::Find( - args2, false, deterministic, - ctx.template device_context()); - workspace_size = std::max(workspace_size, - search2::GetWorkspaceSize(args2, filter_algo)); -#endif - } - - // ------------------- cudnn conv backward data --------------------- - // FIXME(typhoonzero): template type T may not be the same as cudnn call. - int input_offset = input->numel() / input->dims()[0] / groups; - int output_grad_offset = transformed_output_grad.numel() / - transformed_output_grad.dims()[0] / groups; - int filter_offset = filter->numel() / groups; - ScalingParamType alpha = 1.0f; - ScalingParamType beta = 0.0f; - auto workspace_handle = dev_ctx.cudnn_workspace_handle(); - if (input_grad) { - // Because beta is zero, it is unnecessary to reset input_grad. - for (int g = 0; g < groups; g++) { -#ifdef PADDLE_WITH_HIP - auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionForward( - handle, &alpha, args1.idesc.desc(), - output_grad_data + output_grad_offset * g, args1.wdesc.desc(), - filter_data + filter_offset * g, args1.cdesc.desc(), - data_algo, &beta, args1.odesc.desc(), - input_grad_data + input_offset * g, cudnn_workspace, - workspace_size)); - }; -#else // PADDLE_WITH_HIP - auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::cudnnConvolutionForward( - handle, &alpha, args1.idesc.desc(), - output_grad_data + output_grad_offset * g, args1.wdesc.desc(), - filter_data + filter_offset * g, args1.cdesc.desc(), data_algo, - cudnn_workspace, workspace_size, &beta, args1.odesc.desc(), - input_grad_data + input_offset * g)); - }; -#endif // PADDLE_WITH_HIP - workspace_handle.RunFunc(cudnn_func, workspace_size); - } - - if (data_layout == platform::DataLayout::kNHWC) { - Tensor input_grad_transpose; - Tensor input_grad_nchw; - input_grad_nchw.ShareDataWith(*input_grad); - input_grad_nchw.Resize(phi::make_ddim(input_vec)); - if (strides.size() == 2U) { - std::vector axis = {0, 2, 3, 1}; - DataTranspose(ctx, &input_grad_nchw, &input_grad_transpose, - axis); - *input_grad = input_grad_transpose; - } else if (strides.size() == 3U) { - std::vector axis = {0, 2, 3, 4, 1}; - DataTranspose(ctx, &input_grad_nchw, &input_grad_transpose, - axis); - *input_grad = input_grad_transpose; - } - } - } - - // ------------------- cudnn conv backward filter --------------------- - if (filter_grad) { - // Because beta is zero, it is unnecessary to reset filter_grad. - // Gradient with respect to the filter - for (int g = 0; g < groups; g++) { -#ifdef PADDLE_WITH_HIP - auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionBackwardWeights( - handle, &alpha, args2.odesc.desc(), - input_data + input_offset * g, args2.idesc.desc(), - output_grad_data + output_grad_offset * g, args2.cdesc.desc(), - filter_algo, &beta, args2.wdesc.desc(), - filter_grad_data + filter_offset * g, cudnn_workspace, - workspace_size)); - }; -#else // PADDLE_WITH_HIP - auto cudnn_func = [&](void* cudnn_workspace) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, args2.idesc.desc(), - output_grad_data + output_grad_offset * g, args2.odesc.desc(), - input_data + input_offset * g, args2.cdesc.desc(), - filter_algo, cudnn_workspace, workspace_size, &beta, - args2.wdesc.desc(), filter_grad_data + filter_offset * g)); - }; -#endif // PADDLE_WITH_HIP - workspace_handle.RunFunc(cudnn_func, workspace_size); - } - } - } -}; - -/* - * Inputs: I, W, dO, ddI, ddW - * Outputs: ddO, dW, dI - * ddo = conv_bp_data(W, ddI) + conv_bp_data(ddW, I) - * dW = conv_bp_filter(dO, ddI) - * dI = conv(dO, ddW) - */ -template -class CUDNNConvTransposeDoubleGradOpKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto& dev_ctx = ctx.template device_context(); - PADDLE_ENFORCE_EQ( - platform::is_gpu_place(ctx.GetPlace()), true, - paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); - auto X = ctx.Input("Input"); - auto W = ctx.Input("Filter"); - auto dO = ctx.Input("DOutput"); - auto ddX = ctx.Input("DDInput"); - auto ddW = ctx.Input("DDFilter"); - - auto ddO = ctx.Output("DDOutput"); - auto dW = ctx.Output("DFilter"); - auto dX = ctx.Output("DInput"); - - if (ddO) { - ddO->mutable_data(ctx.GetPlace()); - phi::funcs::SetConstant set_zero; - set_zero(dev_ctx, ddO, static_cast(0)); - } - if (dW) { - dW->mutable_data(ctx.GetPlace()); - } - if (dX) { - dX->mutable_data(ctx.GetPlace()); - } - - const T* dy = dO->data(); - const T* w = W->data(); - - const T* ddx = nullptr; - const T* ddw = nullptr; - T *dw, *dx, *ddy; - dw = dx = ddy = nullptr; - T* transformed_dx = nullptr; - const std::vector& strides = ctx.Attr>("strides"); - std::vector dilations = ctx.Attr>("dilations"); - int groups = ctx.Attr("groups"); - - bool deterministic = FLAGS_cudnn_deterministic; - - std::vector paddings = ctx.Attr>("paddings"); - - std::string padding_algorithm = ctx.Attr("padding_algorithm"); - const std::string data_format = ctx.Attr("data_format"); - const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); - - // transform Tensors to channel first----------- - Tensor transformed_X_channel(X->type()); - Tensor transformed_dO_channel(dO->type()); - Tensor transformed_ddX_channel(X->type()); - - Tensor transformed_ddO_channel(dO->type()); - Tensor transformed_dX_channel(X->type()); - - if (channel_last) { - ResizeToChannelFirst( - ctx, X, &transformed_X_channel); - TransToChannelFirst( - ctx, X, &transformed_X_channel); - - ResizeToChannelFirst( - ctx, dO, &transformed_dO_channel); - TransToChannelFirst( - ctx, dO, &transformed_dO_channel); - - if (ddX) { - ResizeToChannelFirst( - ctx, ddX, &transformed_ddX_channel); - TransToChannelFirst( - ctx, ddX, &transformed_ddX_channel); - } - - if (ddO) { - ResizeToChannelFirst( - ctx, ddO, &transformed_ddO_channel); - } - if (dX) { - ResizeToChannelFirst( - ctx, dX, &transformed_dX_channel); - transformed_dX_channel.mutable_data(ctx.GetPlace()); - } - - } else { - transformed_X_channel = *X; - transformed_dO_channel = *dO; - if (ddX) { - transformed_ddX_channel = *ddX; - } - if (dX) { - transformed_dX_channel = *dX; - } - } - std::vector output_vec = - phi::vectorize(transformed_dO_channel.dims()); - - auto in_dims = transformed_X_channel.dims(); - auto filter_dims = W->dims(); - framework::DDim in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - int data_dim = strides.size(); // 2d or 3d - bool is_sys_pad = phi::funcs::IsSymmetricPadding(paddings, data_dim); - Tensor transformed_X(X->type()); - Tensor transformed_ddX(X->type()); - - Tensor transformed_dO(dO->type()); - - std::vector padding_common(data_dim, 0); - std::vector input_pad(X->dims().size() * 2, 0); - - if (!is_sys_pad) { - // get pad - std::vector padding_diff(data_dim); - std::vector new_input_shape_vec(data_dim + 2); - std::vector new_output_grad_shape_vec(data_dim + 2); - - new_input_shape_vec[0] = transformed_X_channel.dims()[0]; - new_input_shape_vec[1] = transformed_X_channel.dims()[1]; - - new_output_grad_shape_vec[0] = transformed_dO_channel.dims()[0]; - new_output_grad_shape_vec[1] = transformed_dO_channel.dims()[1]; - - for (size_t i = 0; i < data_dim; ++i) { - padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]); - padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]); - new_input_shape_vec[i + 2] = - transformed_X_channel.dims()[i + 2] + padding_diff[i]; - - new_output_grad_shape_vec[i + 2] = - transformed_dO_channel.dims()[i + 2] + padding_diff[i]; - - input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i]; - input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i]; - } - framework::DDim new_input_shape(phi::make_ddim(new_input_shape_vec)); - transformed_X.Resize(new_input_shape); - transformed_ddX.Resize(new_input_shape); - - framework::DDim new_output_grad_shape( - phi::make_ddim(new_output_grad_shape_vec)); - transformed_dO.Resize(new_output_grad_shape); - - transformed_dO = - ctx.AllocateTmpTensor( - new_output_grad_shape, dev_ctx); - - transformed_X = - ctx.AllocateTmpTensor( - new_input_shape, dev_ctx); - if (ddX) { - transformed_ddX = - ctx.AllocateTmpTensor( - new_input_shape, dev_ctx); - } - - // pad for input - const int rank = X->dims().size(); - T pad_value(0.0); - switch (rank) { - case 4: { - phi::funcs::PadFunction( - dev_ctx, input_pad, transformed_X_channel, pad_value, - &transformed_X); - if (dO) { - phi::funcs::PadFunction( - dev_ctx, input_pad, transformed_dO_channel, pad_value, - &transformed_dO); - } - - if (ddX) { - phi::funcs::PadFunction( - dev_ctx, input_pad, transformed_ddX_channel, pad_value, - &transformed_ddX); - } - } break; - case 5: { - phi::funcs::PadFunction( - dev_ctx, input_pad, transformed_X_channel, pad_value, - &transformed_X); - if (ddX) { - phi::funcs::PadFunction( - dev_ctx, input_pad, transformed_ddX_channel, pad_value, - &transformed_ddX); - } - } break; - default: - PADDLE_THROW(platform::errors::InvalidArgument( - "ConvOp only support tensors with 4 or 5 dimensions.")); - } - - } else { - transformed_X = transformed_X_channel; - transformed_dO = transformed_dO_channel; - if (ddX) { - transformed_ddX = transformed_ddX_channel; - } - - if (paddings.size() == data_dim) { - for (size_t i = 0; i < data_dim; ++i) { - padding_common[i] = paddings[i]; - } - } else { - for (size_t i = 0; i < data_dim; ++i) { - padding_common[i] = paddings[2 * i]; - } - } - } - - std::vector starts(data_dim, 0); - std::vector ends(data_dim, 0); - std::vector axes(data_dim, 0); - for (size_t i = 0; i < data_dim; ++i) { - starts[i] = input_pad[2 * i + 4] * (strides[i] + 1); - ends[i] = starts[i] + output_vec[i + 2]; - axes[i] = i + 2; - } - - std::vector transformed_output_vec = output_vec; - for (size_t i = 0; i < data_dim; ++i) { - transformed_output_vec[i + 2] = - output_vec[i + 2] + - (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] - - 2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1]; - } - - if (!is_sys_pad) { - DDim transformed_output_shape(phi::make_ddim(transformed_output_vec)); - transformed_ddO_channel.mutable_data(transformed_output_shape, - ctx.GetPlace()); - } else { - ddO->mutable_data(ctx.GetPlace()); - transformed_ddO_channel = *ddO; - transformed_ddO_channel.Resize(phi::make_ddim(transformed_output_vec)); - } - - const T* x = transformed_X.data(); - - int iwo_group = groups; - int c_group = 1; -#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) - iwo_group = 1; - c_group = groups; - groups = 1; -#endif - auto dtype = platform::CudnnDataType::type; - - auto handle = dev_ctx.cudnn_handle(); - - ConvArgs args1{&transformed_ddO_channel, - W, - &transformed_ddX, - strides, - padding_common, - dilations, - dtype}; - ConvArgs args2{&transformed_ddO_channel, ddW, &transformed_X, strides, - padding_common, dilations, dtype}; - - ConvArgs args3{&transformed_dO, - dW, - &transformed_ddX_channel, - strides, - padding_common, - dilations, - dtype}; - ConvArgs args4{ - &transformed_dO, ddW, &transformed_dX_channel, strides, padding_common, - dilations, dtype}; -#ifdef PADDLE_WITH_HIP - miopenConvBwdDataAlgorithm_t bwd_algo1 = - static_cast(0); - miopenConvBwdDataAlgorithm_t bwd_algo2 = - static_cast(0); - miopenConvFwdAlgorithm_t data_algo = - static_cast(0); - miopenConvBwdWeightsAlgorithm_t filter_algo = - static_cast(0); -#else - cudnnConvolutionBwdDataAlgo_t bwd_algo1 = - static_cast(0); - cudnnConvolutionBwdDataAlgo_t bwd_algo2 = - static_cast(0); - cudnnConvolutionFwdAlgo_t data_algo = - static_cast(0); - cudnnConvolutionBwdFilterAlgo_t filter_algo = - static_cast(0); -#endif - - auto layout = GetCudnnTensorFormat(platform::DataLayout::kNCHW); - - // ddo = conv(ddI, W) + conv(I, ddW) - size_t workspace_size = 0; - - T* transformed_ddy_channel = nullptr; - - if (ddO) { - ddy = ddO->data(); - transformed_ddy_channel = transformed_ddO_channel.data(); - if (ddX) { - args1.handle = handle; - args1.idesc.set(transformed_ddO_channel, iwo_group); - args1.wdesc.set(*W, layout, iwo_group); - args1.odesc.set(transformed_ddX, iwo_group); - args1.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_group); -#ifdef PADDLE_WITH_HIP - using search1 = SearchAlgorithm; - workspace_size = search1::GetWorkspaceSize(args1); - bwd_algo1 = search1::Find( - args1, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search1 = SearchAlgorithm; - bwd_algo1 = search1::Find( - args1, false, deterministic, - ctx.template device_context()); - workspace_size = search1::GetWorkspaceSize(args1, bwd_algo1); -#endif - } - - if (ddW) { - ddw = ddW->data(); - args2.handle = handle; - args2.idesc.set(transformed_ddO_channel, iwo_group); - args2.wdesc.set(*ddW, layout, iwo_group); - args2.odesc.set(transformed_X, iwo_group); - args2.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_group); -#ifdef PADDLE_WITH_HIP - using search2 = SearchAlgorithm; - workspace_size = - std::max(workspace_size, search2::GetWorkspaceSize(args2)); - bwd_algo2 = search2::Find( - args2, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search2 = SearchAlgorithm; - bwd_algo2 = search2::Find( - args2, false, deterministic, - ctx.template device_context()); - workspace_size = std::max(workspace_size, - search2::GetWorkspaceSize(args2, bwd_algo2)); -#endif - } - } - - if (dW && ddX) { - dw = dW->data(); - args3.handle = handle; - args3.idesc.set(transformed_dO, iwo_group); - args3.wdesc.set(*dW, layout, iwo_group); - - args3.odesc.set(transformed_ddX_channel, iwo_group); - - args3.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_group); -#ifdef PADDLE_WITH_HIP - using search3 = SearchAlgorithm; - workspace_size = - std::max(workspace_size, search3::GetWorkspaceSize(args3)); - filter_algo = search3::Find( - args3, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search3 = SearchAlgorithm; - filter_algo = search3::Find( - args3, false, deterministic, - ctx.template device_context()); - workspace_size = std::max(workspace_size, - search3::GetWorkspaceSize(args3, filter_algo)); -#endif - } - - if (ddW && dX) { - transformed_dx = transformed_dX_channel.data(); - - args4.handle = handle; - args4.idesc.set(transformed_dO, iwo_group); - args4.wdesc.set(*ddW, layout, iwo_group); - args4.odesc.set(transformed_dX_channel, iwo_group); - args4.cdesc.set(dtype, padding_common, strides, dilations, - platform::AllowTF32Cudnn(), c_group); -#ifdef PADDLE_WITH_HIP - using search4 = SearchAlgorithm; - workspace_size = - std::max(workspace_size, search4::GetWorkspaceSize(args4)); - data_algo = search4::Find( - args4, false, deterministic, workspace_size, - ctx.template device_context()); -#else - using search4 = SearchAlgorithm; - data_algo = search4::Find( - args4, false, deterministic, - ctx.template device_context()); - workspace_size = - std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); -#endif - } - - int i_n, i_c, i_d, i_h, i_w; - GetNCDHW(transformed_X.dims(), platform::DataLayout::kNCHW, &i_n, &i_c, - &i_d, &i_h, &i_w); - - int o_n, o_c, o_d, o_h, o_w; - GetNCDHW(transformed_dO.dims(), platform::DataLayout::kNCHW, &o_n, &o_c, - &o_d, &o_h, &o_w); - - int group_offset_in = - transformed_X.numel() / transformed_X.dims()[0] / groups; - int group_offset_out = - transformed_dO.numel() / transformed_dO.dims()[0] / groups; - int group_offset_filter = W->numel() / groups; - - ScalingParamType alpha = 1.0f; - ScalingParamType beta = 0.0f; - - auto wkspace_handle = dev_ctx.cudnn_workspace_handle(); - - if (ddO) { - if (ddX) { - ddx = transformed_ddX.data(); - for (int i = 0; i < groups; i++) { -#ifdef PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionBackwardData( - handle, &alpha, args1.odesc.desc(), - ddx + i * group_offset_in, args1.wdesc.desc(), - w + i * group_offset_filter, args1.cdesc.desc(), - bwd_algo1, &beta, args1.idesc.desc(), - transformed_ddy_channel + i * group_offset_out, - workspace_ptr, workspace_size)); - }, - workspace_size); -#else // PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, args1.wdesc.desc(), - w + i * group_offset_filter, args1.odesc.desc(), - ddx + i * group_offset_in, args1.cdesc.desc(), - bwd_algo1, workspace_ptr, workspace_size, &beta, - args1.idesc.desc(), - transformed_ddy_channel + i * group_offset_out)); - }, - workspace_size); -#endif // PADDLE_WITH_HIP - } - } - if (ddW) { - for (int i = 0; i < groups; i++) { -#ifdef PADDLE_WITH_HIP - // MIOPEN ONLY support beta to be 0.0f - Tensor conv_x_ddw(dO->type()); - conv_x_ddw.Resize(transformed_ddO_channel.dims()); - T* conv_x_ddw_data = conv_x_ddw.mutable_data(ctx.GetPlace()); - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionBackwardData( - handle, &alpha, args2.odesc.desc(), - x + i * group_offset_in, args2.wdesc.desc(), - ddw + i * group_offset_filter, args2.cdesc.desc(), - bwd_algo2, &beta, args2.idesc.desc(), - conv_x_ddw_data + i * group_offset_out, workspace_ptr, - workspace_size)); - }, - workspace_size); - PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenOpTensor( - handle, miopenTensorOpAdd, &alpha, args2.idesc.desc(), - transformed_ddy_channel + i * group_offset_out, &alpha, - args2.idesc.desc(), conv_x_ddw_data + i * group_offset_out, &beta, - args2.idesc.desc(), - transformed_ddy_channel + i * group_offset_out)); -#else // PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, args2.wdesc.desc(), - ddw + i * group_offset_filter, args2.odesc.desc(), - x + i * group_offset_in, args2.cdesc.desc(), bwd_algo2, - workspace_ptr, workspace_size, &alpha, - args2.idesc.desc(), - transformed_ddy_channel + i * group_offset_out)); - }, - workspace_size); -#endif // PADDLE_WITH_HIP - } - } - if ((!is_sys_pad) && (!channel_last)) { - if (strides.size() == 2U) { - Slice( - ctx, &transformed_ddO_channel, ddO, starts, ends, axes); - } else if (!is_sys_pad && strides.size() == 3U) { - Slice( - ctx, &transformed_ddO_channel, ddO, starts, ends, axes); - } - } else if ((!is_sys_pad) && (channel_last)) { - if (strides.size() == 2U) { - Slice( - ctx, &transformed_ddO_channel, &transformed_ddO_channel, starts, - ends, axes); - } else if (!is_sys_pad && strides.size() == 3U) { - Slice( - ctx, &transformed_ddO_channel, &transformed_ddO_channel, starts, - ends, axes); - } - - TransToChannelLast( - ctx, &transformed_ddO_channel, ddO); - } - } - - T* transformed_dy_channel = transformed_dO.data(); - if (dW && ddX) { - ddx = transformed_ddX_channel.data(); - for (int i = 0; i < groups; i++) { -#ifdef PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionBackwardWeights( - handle, &alpha, args3.odesc.desc(), - ddx + i * group_offset_in, args3.idesc.desc(), - transformed_dy_channel + i * group_offset_out, - args3.cdesc.desc(), filter_algo, &beta, - args3.wdesc.desc(), dw + i * group_offset_filter, - workspace_ptr, workspace_size)); - }, - workspace_size); -#else // PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, args3.idesc.desc(), - transformed_dy_channel + i * group_offset_out, - args3.odesc.desc(), ddx + i * group_offset_in, - args3.cdesc.desc(), filter_algo, workspace_ptr, - workspace_size, &beta, args3.wdesc.desc(), - dw + i * group_offset_filter)); - }, - workspace_size); -#endif // PADDLE_WITH_HIP - } - } - - if (dX && ddW) { - ddw = ddW->data(); - for (int i = 0; i < groups; i++) { -#ifdef PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::miopenConvolutionForward( - handle, &alpha, args4.idesc.desc(), - transformed_dy_channel + i * group_offset_out, - args4.wdesc.desc(), ddw + i * group_offset_filter, - args4.cdesc.desc(), data_algo, &beta, args4.odesc.desc(), - transformed_dx + i * group_offset_in, workspace_ptr, - workspace_size)); - }, - workspace_size); -#else // PADDLE_WITH_HIP - wkspace_handle.RunFunc( - [&](void* workspace_ptr) { - PADDLE_ENFORCE_GPU_SUCCESS( - platform::dynload::cudnnConvolutionForward( - handle, &alpha, args4.idesc.desc(), - transformed_dy_channel + i * group_offset_out, - args4.wdesc.desc(), ddw + i * group_offset_filter, - args4.cdesc.desc(), data_algo, workspace_ptr, - workspace_size, &beta, args4.odesc.desc(), - transformed_dx + i * group_offset_in)); - }, - workspace_size); -#endif // PADDLE_WITH_HIP - } - if (channel_last) { - TransToChannelLast( - ctx, &transformed_dX_channel, dX); - } - } - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -namespace plat = paddle::platform; - -#ifdef PADDLE_WITH_HIP -// MIOPEN do not support double -REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeOpKernel, - ops::CUDNNConvTransposeOpKernel); -REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeGradOpKernel, - ops::CUDNNConvTransposeGradOpKernel); -REGISTER_OP_KERNEL( - conv2d_transpose_grad_grad, CUDNN, plat::CUDAPlace, - paddle::operators::CUDNNConvTransposeDoubleGradOpKernel, - paddle::operators::CUDNNConvTransposeDoubleGradOpKernel); - -REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeOpKernel, - ops::CUDNNConvTransposeOpKernel); -REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeGradOpKernel, - ops::CUDNNConvTransposeGradOpKernel); -#else -REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeOpKernel, - ops::CUDNNConvTransposeOpKernel, - ops::CUDNNConvTransposeOpKernel); -REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeGradOpKernel, - ops::CUDNNConvTransposeGradOpKernel, - ops::CUDNNConvTransposeGradOpKernel); -REGISTER_OP_KERNEL( - conv2d_transpose_grad_grad, CUDNN, plat::CUDAPlace, - paddle::operators::CUDNNConvTransposeDoubleGradOpKernel, - paddle::operators::CUDNNConvTransposeDoubleGradOpKernel, - paddle::operators::CUDNNConvTransposeDoubleGradOpKernel); - -REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeOpKernel, - ops::CUDNNConvTransposeOpKernel, - ops::CUDNNConvTransposeOpKernel); -REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace, - ops::CUDNNConvTransposeGradOpKernel, - ops::CUDNNConvTransposeGradOpKernel, - ops::CUDNNConvTransposeGradOpKernel); -#endif diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 86532664985b4f985099c44d36c2409e8d955132..fe76fc3aebbc173e4d916d2d2217a8d2922d169e 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -13,13 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/conv_transpose_op.h" -#include + #include #include #include "paddle/fluid/framework/data_layout.h" +#include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" #include "paddle/fluid/platform/cudnn_workspace_helper.h" - +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/backward.h" +#include "paddle/phi/infermeta/binary.h" #ifdef PADDLE_WITH_MKLDNN #include "paddle/fluid/platform/mkldnn_helper.h" #endif @@ -29,165 +33,6 @@ namespace operators { using DataLayout = framework::DataLayout; -void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { - OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "ConvTranspose"); - OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "ConvTranspose"); - OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "ConvTranspose"); - - auto in_dims = ctx->GetInputDim("Input"); - auto filter_dims = ctx->GetInputDim("Filter"); - std::vector output_size = - ctx->Attrs().Get>("output_size"); - std::vector output_padding = - ctx->Attrs().Get>("output_padding"); - std::vector strides = ctx->Attrs().Get>("strides"); - std::vector paddings = ctx->Attrs().Get>("paddings"); - std::vector dilations = ctx->Attrs().Get>("dilations"); - int groups = ctx->Attrs().Get("groups"); - std::string padding_algorithm = - ctx->Attrs().Get("padding_algorithm"); - const std::string data_layout_str = - ctx->Attrs().Get("data_format"); - const DataLayout data_layout = - ctx->IsRunMKLDNNKernel() ? DataLayout::kNCHW - : framework::StringToDataLayout(data_layout_str); - - PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true, - platform::errors::InvalidArgument( - "Input of Op(conv_transpose) should be 4-D or " - "5-D Tensor. But received: %u-D Tensor, " - "the shape of input is [%s]", - in_dims.size(), in_dims)); - PADDLE_ENFORCE_EQ( - in_dims.size(), filter_dims.size(), - platform::errors::InvalidArgument( - "The input's dimension size and filter's dimension size of " - "Op (conv_transpose) should be equal. But received: the shape of " - "input is [%s], the dimension size of input is [%d], the shape " - "of filter is [%s], the dimension size of filter is [%d]. ", - in_dims, in_dims.size(), filter_dims, filter_dims.size())); - - int stride_size = strides.size(); - for (int i = 0; i < stride_size; ++i) { - PADDLE_ENFORCE_GT( - strides[i], 0, - platform::errors::InvalidArgument( - "The stride of Op(Conv) should be larget than 0, but received " - "stride is %d.", - strides[i])); - } - - int in_sub_stride_size = in_dims.size() - stride_size; - - PADDLE_ENFORCE_EQ( - in_dims.size() - strides.size(), 2U, - platform::errors::InvalidArgument( - "The input's dimension size minus Attr(stride)'s size must " - "be euqal to 2 for Op(conv_transpose). But received: [%d], the " - "input's dimension size is [%d], the shape of input " - "is [%s], the Attr(stride)'s size is [%d].", - in_sub_stride_size, in_dims.size(), in_dims, strides.size())); - if (output_size.size()) - PADDLE_ENFORCE_EQ( - output_size.size(), strides.size(), - platform::errors::InvalidArgument( - "The Attr(output_size) and Attr(stride) of Op(conv_transpose) " - "should be the same.")); - if (output_padding.size()) - PADDLE_ENFORCE_EQ( - output_padding.size(), strides.size(), - platform::errors::InvalidArgument( - "The Attr(output_padding) and Attr(stride) of Op(conv_transpose) " - "should be the same.")); - - const int64_t C = - (data_layout != DataLayout::kNHWC ? in_dims[1] - : in_dims[in_dims.size() - 1]); - PADDLE_ENFORCE_EQ( - C, filter_dims[0], - platform::errors::InvalidArgument( - "The number of input channels should be equal to filter channels " - "for Op(conv_transpose). But received: the input's channels is " - "[%d], the shape of input is [%s], the filter's channels is [%d], " - "the shape of filter is [%s]. The data_format is %s." - "The error may come from wrong data_format setting.", - C, in_dims, filter_dims[0], filter_dims, data_layout_str)); - - framework::DDim in_data_dims; - if (data_layout != DataLayout::kNHWC) { - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - } else { - in_data_dims = phi::slice_ddim(in_dims, 1, in_dims.size() - 1); - } - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - std::vector output_shape({in_dims[0]}); - if (data_layout != DataLayout::kNHWC) { - output_shape.push_back(filter_dims[1] * groups); - } - const int offset = (data_layout != DataLayout::kNHWC ? 2 : 1); - for (size_t i = 0; i < strides.size(); ++i) { - auto filter_extent = dilations[i] * (filter_dims[i + 2] - 1) + 1; - auto infer_shape = (ctx->IsRuntime() || in_dims[i + offset] > 0) - ? (in_dims[i + offset] - 1) * strides[i] - - paddings[2 * i] - paddings[2 * i + 1] + - filter_extent - : -1; - if (output_size.size()) { - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_GE( - output_size[i], infer_shape, - platform::errors::InvalidArgument( - "output_size of Op(ConvTransposeOp) should not be " - "less than the infered output size. But received output_size = " - "[%s], whose dim %d is less than the infered output size [%s]", - phi::make_ddim(output_size).to_str(), i, infer_shape)); - PADDLE_ENFORCE_LT( - output_size[i], infer_shape + strides[i], - platform::errors::InvalidArgument( - "output_size of Op(ConvTransposeOp) should be less " - "than infered size + stride. But received output_size = [%s], " - "whose dim %d is not less than the infered output size (%d) + " - "stride (%d) = %d", - phi::make_ddim(output_size).to_str(), i, infer_shape, - strides[i], infer_shape + strides[i])); - } - output_shape.push_back(output_size[i]); - } else if (output_padding.size()) { - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_GE( - output_padding[i], 0, - platform::errors::InvalidArgument( - "output_padding of Op(ConvTransposeOp) should not be " - "less than the 0. But received output_padding = " - "[%s], whose dim %d is less than 0", - phi::make_ddim(output_padding).to_str(), i)); - PADDLE_ENFORCE_LT( - output_padding[i], std::max(strides[i], dilations[i]), - platform::errors::InvalidArgument( - "output_padding of Op(ConvTransposeOp) should be less " - "than either stride or dilation. But received output_size = " - "[%s], " - "whose dim %d is not less than either stride (%d) or " - "dilation (%d)", - phi::make_ddim(output_size).to_str(), i, strides[i], - dilations[i])); - } - output_shape.push_back((infer_shape + output_padding[i])); - } else { - output_shape.push_back(infer_shape); - } - } - if (data_layout == DataLayout::kNHWC) { - output_shape.push_back(filter_dims[1] * groups); - } - ctx->SetOutputDim("Output", phi::make_ddim(output_shape)); -} - framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { framework::LibraryType library_{framework::LibraryType::kPlain}; @@ -217,7 +62,7 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( } framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar( - const std::string& var_name, const Tensor& tensor, + const std::string& var_name, const framework::Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const { #ifdef PADDLE_WITH_MKLDNN // Only input require reshaping, weights and @@ -493,17 +338,6 @@ Example: )DOC"); } -void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { - auto in_dims = ctx->GetInputDim("Input"); - auto filter_dims = ctx->GetInputDim("Filter"); - if (ctx->HasOutput(framework::GradVarName("Input"))) { - ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); - } - if (ctx->HasOutput(framework::GradVarName("Filter"))) { - ctx->SetOutputDim(framework::GradVarName("Filter"), filter_dims); - } -} - framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = @@ -587,24 +421,6 @@ class ConvTransposeDoubleGradMaker : public framework::SingleGradOpMaker { } }; -void ConvTransposeOpDoubleGrad::InferShape( - framework::InferShapeContext* ctx) const { - auto x_dims = ctx->GetInputDim("Input"); - auto w_dims = ctx->GetInputDim("Filter"); - auto do_dims = ctx->GetInputDim("DOutput"); - - if (ctx->HasOutput("DDOutput") && - (ctx->HasInput("DDInput") || (ctx->HasInput("DDFilter")))) { - ctx->SetOutputDim("DDOutput", do_dims); - } - if (ctx->HasOutput("DFilter") && ctx->HasInput("DDInput")) { - ctx->SetOutputDim("DFilter", w_dims); - } - if (ctx->HasOutput("DInput") && ctx->HasInput("DDFilter")) { - ctx->SetOutputDim("DInput", x_dims); - } -} - framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { bool use_cudnn = @@ -635,59 +451,57 @@ framework::OpKernelType ConvTransposeOpDoubleGrad::GetExpectedKernelType( namespace ops = paddle::operators; // conv2d_transpose +DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose, Conv2dTranposeInferShapeFunctor, + PD_INFER_META(phi::ConvTransposeInferMeta)); +DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad, + Conv2dTranposeGradInferShapeFunctor, + PD_INFER_META(phi::ConvTransposeGradInferMeta)); +DECLARE_INFER_SHAPE_FUNCTOR( + conv2d_transpose_grad_grad, Conv2dTranposeDoubleGradInferShapeFunctor, + PD_INFER_META(phi::Conv2dTransposeDoubleGradInferMeta)); + REGISTER_OPERATOR(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, ops::ConvTransposeGradOpMaker, - ops::ConvTransposeGradOpMaker); -REGISTER_OPERATOR( - conv2d_transpose_grad, ops::ConvTransposeOpGrad, - ops::ConvTransposeDoubleGradMaker, - ops::ConvTransposeDoubleGradMaker); -REGISTER_OPERATOR(conv2d_transpose_grad_grad, ops::ConvTransposeOpDoubleGrad); - -REGISTER_OP_CPU_KERNEL( - conv2d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CPU_KERNEL( - conv2d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::ConvTransposeGradOpMaker, + Conv2dTranposeInferShapeFunctor); +REGISTER_OPERATOR(conv2d_transpose_grad, ops::ConvTransposeOpGrad, + ops::ConvTransposeDoubleGradMaker, + ops::ConvTransposeDoubleGradMaker, + Conv2dTranposeGradInferShapeFunctor); +REGISTER_OPERATOR(conv2d_transpose_grad_grad, ops::ConvTransposeOpDoubleGrad, + Conv2dTranposeDoubleGradInferShapeFunctor); // conv3d_transpose +DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose, Conv3dTranposeInferShapeFunctor, + PD_INFER_META(phi::ConvTransposeInferMeta)); +DECLARE_INFER_SHAPE_FUNCTOR(conv3d_transpose_grad, + Conv3dTranposeGradInferShapeFunctor, + PD_INFER_META(phi::ConvTransposeGradInferMeta)); + REGISTER_OPERATOR(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker, ops::ConvTransposeGradOpMaker, - ops::ConvTransposeGradOpMaker); -REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad); - -REGISTER_OP_CPU_KERNEL( - conv3d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CPU_KERNEL( - conv3d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::ConvTransposeGradOpMaker, + Conv3dTranposeInferShapeFunctor); +REGISTER_OPERATOR(conv3d_transpose_grad, ops::ConvTransposeOpGrad, + Conv3dTranposeGradInferShapeFunctor); // depthwise conv2d_transpose +DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose, + DepthWiseConv2dTranposeInferShapeFunctor, + PD_INFER_META(phi::ConvTransposeInferMeta)); +DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad, + DepthWiseConv2dTranposeGradInferShapeFunctor, + PD_INFER_META(phi::ConvTransposeGradInferMeta)); + REGISTER_OPERATOR(depthwise_conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, ops::ConvTransposeGradOpMaker, - ops::ConvTransposeGradOpMaker); -REGISTER_OPERATOR(depthwise_conv2d_transpose_grad, ops::ConvTransposeOpGrad); - -REGISTER_OP_CPU_KERNEL( - depthwise_conv2d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CPU_KERNEL( - depthwise_conv2d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); + ops::ConvTransposeGradOpMaker, + DepthWiseConv2dTranposeInferShapeFunctor); +REGISTER_OPERATOR(depthwise_conv2d_transpose_grad, ops::ConvTransposeOpGrad, + DepthWiseConv2dTranposeGradInferShapeFunctor); REGISTER_OP_VERSION(conv_transpose) .AddCheckpoint( diff --git a/paddle/fluid/operators/conv_transpose_op.cu b/paddle/fluid/operators/conv_transpose_op.cu deleted file mode 100644 index 054cb4b33895b02a816cc2bff82b1c9052bc645d..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/conv_transpose_op.cu +++ /dev/null @@ -1,185 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/conv_transpose_op.h" -#include "paddle/phi/kernels/gpu/depthwise_conv.h" - -namespace ops = paddle::operators; -using CUDA = paddle::platform::CUDADeviceContext; - -namespace paddle { -namespace operators { -using Tensor = framework::Tensor; -using DDim = framework::DDim; - -template -class DepthwiseConvTransposeKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const std::string data_layout_str = - context.Attr("data_format"); - const framework::DataLayout data_layout = - framework::StringToDataLayout(data_layout_str); - const Tensor* input = context.Input("Input"); - Tensor filter = *context.Input("Filter"); - Tensor* output = context.Output("Output"); - output->mutable_data(context.GetPlace()); - - int groups = context.Attr("groups"); - PADDLE_ENFORCE_EQ( - groups, filter.dims()[0], - platform::errors::InvalidArgument( - "groups should be error to the 1st dimension of filter. But " - "received groups is %d and filter dimension[0] is %d", - groups, filter.dims()[0])); - - std::vector strides = context.Attr>("strides"); - std::vector paddings = context.Attr>("paddings"); - std::vector dilations = context.Attr>("dilations"); - std::string padding_algorithm = - context.Attr("padding_algorithm"); - for (auto v : dilations) { - PADDLE_ENFORCE_EQ(v, 1, platform::errors::InvalidArgument( - "dilations should be 1 in depthwise conv. " - "But received dilations is %d", - v)); - } - - auto in_dims = input->dims(); - auto filter_dims = filter.dims(); - - framework::DDim in_data_dims; - if (data_layout != framework::DataLayout::kNHWC) { - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - } else { - in_data_dims = phi::slice_ddim(in_dims, 1, in_dims.size() - 1); - } - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - output->mutable_data(context.GetPlace()); - auto& dev_ctx = context.template device_context(); - phi::funcs::SetConstant set_zero; - set_zero(dev_ctx, output, static_cast(0)); - - math::DepthwiseConvInputGradFunctor - depthwiseConvInputGrad; - depthwiseConvInputGrad( - static_cast::TYPE&>(dev_ctx), - *output, filter, *input, strides, - std::vector{paddings[0], paddings[2], paddings[1], paddings[3]}, - dilations, output, data_layout); - } -}; - -template -class DepthwiseConvTransposeGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const std::string data_layout_str = - context.Attr("data_format"); - const framework::DataLayout data_layout = - framework::StringToDataLayout(data_layout_str); - const Tensor* input = context.Input("Input"); - const Tensor* output_grad = - context.Input(framework::GradVarName("Output")); - Tensor* input_grad = - context.Output(framework::GradVarName("Input")); - Tensor* filter_grad = - context.Output(framework::GradVarName("Filter")); - Tensor filter = *context.Input("Filter"); - - if (!input_grad && !filter_grad) return; - - auto& dev_ctx = context.template device_context(); - std::vector strides = context.Attr>("strides"); - std::vector paddings = context.Attr>("paddings"); - std::vector dilations = context.Attr>("dilations"); - std::string padding_algorithm = - context.Attr("padding_algorithm"); - - auto in_dims = input->dims(); - auto filter_dims = filter.dims(); - - framework::DDim in_data_dims; - if (data_layout != framework::DataLayout::kNHWC) { - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - } else { - in_data_dims = phi::slice_ddim(in_dims, 1, in_dims.size() - 1); - } - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - if (input_grad) { - math::DepthwiseConvFunctor depthwiseConv; - depthwiseConv( - static_cast::TYPE&>(dev_ctx), - *output_grad, filter, strides, - std::vector{paddings[0], paddings[2], paddings[1], paddings[3]}, - dilations, input_grad, data_layout); - } - - if (filter_grad) { - phi::funcs::SetConstant set_zero; - filter_grad->mutable_data(context.GetPlace()); - set_zero(dev_ctx, filter_grad, static_cast(0)); - - math::DepthwiseConvFilterGradFunctor - depthwiseConvFilterGrad; - depthwiseConvFilterGrad( - static_cast::TYPE&>(dev_ctx), - *output_grad, *input, strides, - std::vector{paddings[0], paddings[2], paddings[1], paddings[3]}, - dilations, filter_grad, data_layout); - } - } -}; - -} // namespace operators -} // namespace paddle -// conv2d -REGISTER_OP_CUDA_KERNEL(conv2d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CUDA_KERNEL(conv2d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); -REGISTER_OP_CUDA_KERNEL(conv2d_transpose_grad_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); - -// conv3d -REGISTER_OP_CUDA_KERNEL(conv3d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CUDA_KERNEL(conv3d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); - -// depthwise conv2d -REGISTER_OP_CUDA_KERNEL(depthwise_conv2d_transpose, - ops::DepthwiseConvTransposeKernel, - ops::DepthwiseConvTransposeKernel); -REGISTER_OP_CUDA_KERNEL(depthwise_conv2d_transpose_grad, - ops::DepthwiseConvTransposeGradKernel, - ops::DepthwiseConvTransposeGradKernel); diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index ee0fb7ab3683364f6db3cffd7ddef67c61f19433..ac95dceb8280cdee6d2fcafa686d951ad8866efc 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -13,72 +13,14 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include -#include -#include -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/conv_op.h" -#include "paddle/fluid/operators/eigen/eigen_function.h" -#include "paddle/fluid/operators/math/concat_and_split.h" -#include "paddle/fluid/operators/math/im2col.h" -#include "paddle/fluid/operators/math/vol2col.h" -#include "paddle/phi/kernels/funcs/blas/blas.h" + +#include "paddle/fluid/framework/op_kernel_type.h" +#include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { -using Tensor = framework::Tensor; -using DDim = framework::DDim; - -template -static void Slice(const framework::ExecutionContext& context, - const Tensor* input, Tensor* out, - const std::vector& begin_vec, - const std::vector& end_vec, - const std::vector& axes_vec) { - auto& place = - *context.template device_context().eigen_device(); - auto in_dims = input->dims(); - auto offsets = Eigen::DSizes(); - auto extents = Eigen::DSizes(); - for (size_t i = 0; i < D; ++i) { - offsets[i] = 0; - extents[i] = in_dims[i]; - } - - std::vector out_shape_vec = phi::vectorize(in_dims); - for (size_t i = 0; i < axes_vec.size(); ++i) { - offsets[axes_vec[i]] = begin_vec[i]; - extents[axes_vec[i]] = end_vec[i] - begin_vec[i]; - out_shape_vec[axes_vec[i]] = end_vec[i] - begin_vec[i]; - } - - framework::DDim out_dims(phi::make_ddim(out_shape_vec)); - out->mutable_data(out_dims, context.GetPlace()); - - auto in_t = - framework::EigenTensor::From( - *input); - auto out_t = - framework::EigenTensor::From( - *out, out_dims); - - EigenSlice, T, D>::Eval(place, out_t, in_t, - offsets, extents); - out->Resize(out_dims); -} - -template -static void Slice(const framework::ExecutionContext& context, - const Tensor* input, Tensor* out, int64_t begin_idx, - int64_t end_idx, int64_t axes) { - std::vector begin_vec = {begin_idx}; - std::vector end_vec = {end_idx}; - std::vector axes_vec = {axes}; - Slice(context, input, out, begin_vec, end_vec, axes_vec); -} - // Define Op classes in .h file so that other conv transpose // operator implementations can reuse the code. class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { @@ -94,21 +36,19 @@ class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { class ConvTransposeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override; protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; framework::OpKernelType GetKernelTypeForVar( - const std::string& var_name, const Tensor& tensor, + const std::string& var_name, const framework::Tensor& tensor, const framework::OpKernelType& expected_kernel_type) const override; }; class ConvTransposeOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override; protected: framework::OpKernelType GetExpectedKernelType( @@ -118,464 +58,11 @@ class ConvTransposeOpGrad : public framework::OperatorWithKernel { class ConvTransposeOpDoubleGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const override; protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; }; -template -class GemmConvTransposeKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const std::string data_layout_str = - context.Attr("data_format"); - const framework::DataLayout data_layout = - framework::StringToDataLayout(data_layout_str); - const Tensor* input = context.Input("Input"); - // The filter will be reshaped, so it should not be constant pointer - Tensor filter = *context.Input("Filter"); - Tensor* output = context.Output("Output"); - - std::vector strides = context.Attr>("strides"); - std::vector paddings = context.Attr>("paddings"); - std::vector dilations = context.Attr>("dilations"); - int groups = context.Attr("groups"); - std::string padding_algorithm = - context.Attr("padding_algorithm"); - - auto in_dims = input->dims(); - auto filter_dims = filter.dims(); - auto out_dims = output->dims(); - const int batch_size = static_cast(input->dims()[0]); - - framework::DDim in_data_dims; - if (data_layout != framework::DataLayout::kNHWC) { - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - } else { - in_data_dims = phi::slice_ddim(in_dims, 1, in_dims.size() - 1); - } - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - // input_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first - // input_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last - std::vector input_shape_vec = phi::vectorize(input->dims()); - // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w} - std::vector filter_shape_vec = phi::vectorize(filter.dims()); - - // use col_shape in the im2col and col2im (or vol2col and col2vol) - // calculation - // col_shape_vec: {o_c/g, k_h, k_w, h, w} or {o_c/g, k_d, k_h, k_w, d, h, w} - size_t data_dim = filter_shape_vec.size() - 2; - std::vector col_shape_vec(1 + 2 * data_dim); - if (data_layout != framework::DataLayout::kNHWC) { - col_shape_vec[0] = out_dims[1] / groups; - for (size_t j = 0; j < data_dim; ++j) { - col_shape_vec[j + 1] = filter_shape_vec[j + 2]; - col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2]; - } - } else { - col_shape_vec[0] = out_dims[out_dims.size() - 1] / groups; - for (size_t j = 0; j < data_dim; ++j) { - col_shape_vec[j + 1] = filter_shape_vec[j + 2]; - col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 1]; - } - } - DDim col_shape(phi::make_ddim(col_shape_vec)); - - // use col_matrix_shape in the gemm calculation - // size: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d * k_h * k_w, d * h * w) - DDim col_matrix_shape = phi::flatten_to_2d(col_shape, data_dim + 1); - - Tensor col; - col.mutable_data(col_shape, context.GetPlace()); - // col_matrix shares the same piece of data with col, - // but will be reshaped into a two-dimensional matrix shape - // to call the matrix multiplication interface. - Tensor col_matrix; - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - - // output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first - // output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last - DDim output_shape = - phi::slice_ddim(output->dims(), 1, output->dims().size()); - - // input matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first - // input matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last - DDim input_matrix_shape; - if (data_layout != framework::DataLayout::kNHWC) { - input_matrix_shape = {in_dims[1], col_matrix_shape[1]}; - } else { - input_matrix_shape = {col_matrix_shape[1], in_dims[in_dims.size() - 1]}; - } - - // filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w) - DDim filter_matrix_shape; - if (data_layout != framework::DataLayout::kNHWC) { - filter_matrix_shape = {in_dims[1], col_matrix_shape[0]}; - } else { - filter_matrix_shape = {in_dims[in_dims.size() - 1], col_matrix_shape[0]}; - } - filter.Resize(filter_matrix_shape); - - output->mutable_data(context.GetPlace()); - phi::funcs::SetConstant set_zero; - auto& dev_ctx = context.template device_context(); - auto blas = phi::funcs::GetBlas(dev_ctx); - set_zero(dev_ctx, output, static_cast(0)); - - int in_step = - (data_layout != framework::DataLayout::kNHWC - ? static_cast(in_dims[1]) / groups - : static_cast(in_dims[in_dims.size() - 1]) / groups); - - int out_step = - (data_layout != framework::DataLayout::kNHWC - ? static_cast(out_dims[1]) / groups - : static_cast(out_dims[out_dims.size() - 1]) / groups); - math::Col2ImFunctor col2im; - math::Col2VolFunctor col2vol; - math::ConcatFunctor concat_functor; - - // convolution transpose: gemm + col2im or col2vol (similar to conv-backward - // on input) - size_t D = input->dims().size(); - for (int i = 0; i < batch_size; i++) { - // batch with size (i_c, h * w) or (i_c, d * h * w) for channel_first - // batch with size (h * w, i_c) or (d * h * w, i_c) for channel_last - Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape); - - // output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first - // output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last - Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape); - - std::vector output_batch_vec; - for (int g = 0; g < groups; g++) { - int64_t start = g * in_step; - int64_t end = (g + 1) * in_step; - int axes = (data_layout != framework::DataLayout::kNHWC ? 0 : 1); - Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step); - Tensor in_slice, out_slice; - - // col_matrix = filter_slice * input_slice - // of shape (o_c/g * k_h * k_w, h * w) - // or (o_c/g * k_d * k_h * k_w, d * h * w) - if (data_layout != framework::DataLayout::kNHWC) { - in_slice = input_batch.Slice(g * in_step, (g + 1) * in_step); - out_slice = output_batch.Slice(g * out_step, (g + 1) * out_step); - blas.MatMul(filter_slice, true, in_slice, false, static_cast(1.0), - &col_matrix, static_cast(0.0)); - } else { - Slice(context, &input_batch, &in_slice, start, - end, axes); - start = g * out_step; - end = (g + 1) * out_step; - axes = D - 2; - if (D == 4U) { - Slice(context, &output_batch, &out_slice, - start, end, axes); - } else if (D == 5U) { - Slice(context, &output_batch, &out_slice, - start, end, axes); - } - blas.MatMul(filter_slice, true, in_slice, true, static_cast(1.0), - &col_matrix, static_cast(0.0)); - } - - if (data_dim == 2U) { - // col2im: col_matrix -> dy - // from (o_c/g * k_h * k_w, h * w) to (o_c/g, o_h, o_w) or (o_h, o_w, - // o_c/g) - col2im(dev_ctx, col, dilations, strides, - std::vector{paddings[0], paddings[2], paddings[1], - paddings[3]}, - &out_slice, data_layout); - } else if (data_dim == 3U) { - // col2vol: col_matrix -> dy - // from (o_c/g * k_d * k_h * k_w, d * h * w) to (o_c/g, o_d, o_h, o_w) - // or (o_d, o_h, o_w, o_c/g) - col2vol(dev_ctx, col, dilations, strides, paddings, &out_slice, - data_layout); - } - if (data_layout == framework::DataLayout::kNHWC) { - output_batch_vec.push_back(out_slice); - } - } - if (data_layout == framework::DataLayout::kNHWC) { - concat_functor(dev_ctx, output_batch_vec, static_cast(D - 2), - &output_batch); - } - } - } -}; - -template -class GemmConvTransposeGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const std::string data_layout_str = - context.Attr("data_format"); - const framework::DataLayout data_layout = - framework::StringToDataLayout(data_layout_str); - const Tensor* input = context.Input("Input"); - const Tensor* output_grad = - context.Input(framework::GradVarName("Output")); - // For filter, we do not use const pointer b/c we will do reshape, - // but we should avoid modifying its value. - Tensor filter = *context.Input("Filter"); - Tensor* input_grad = - context.Output(framework::GradVarName("Input")); - Tensor* filter_grad = - context.Output(framework::GradVarName("Filter")); - - if ((!input_grad) && (!filter_grad)) return; - - std::vector strides = context.Attr>("strides"); - std::vector paddings = context.Attr>("paddings"); - std::vector dilations = context.Attr>("dilations"); - int groups = context.Attr("groups"); - std::string padding_algorithm = - context.Attr("padding_algorithm"); - - auto in_dims = input->dims(); - auto filter_dims = filter.dims(); - auto out_grad_dims = output_grad->dims(); - const int batch_size = static_cast(input->dims()[0]); - - framework::DDim in_data_dims; - if (data_layout != framework::DataLayout::kNHWC) { - in_data_dims = phi::slice_ddim(in_dims, 2, in_dims.size()); - } else { - in_data_dims = phi::slice_ddim(in_dims, 1, in_dims.size() - 1); - } - framework::DDim filter_data_dims = - phi::slice_ddim(filter_dims, 2, filter_dims.size()); - std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); - - // input_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first - // input_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last - std::vector input_shape_vec = phi::vectorize(input->dims()); - // filter_shape_vec: {i_c, o_c, k_h, k_w} or {i_c, o_c, k_d, k_h, k_w} - std::vector filter_shape_vec = phi::vectorize(filter.dims()); - - // use col_shape in the im2col and col2im (or vol2col and col2vol) - // calculation - // col_shape_vec: {o_c, k_h, k_w, h, w} or {o_c, k_d, k_h, k_w, d, h, w} for - size_t data_dim = filter_shape_vec.size() - 2; - std::vector col_shape_vec(1 + 2 * data_dim); - if (data_layout != framework::DataLayout::kNHWC) { - col_shape_vec[0] = out_grad_dims[1]; - for (size_t j = 0; j < data_dim; ++j) { - col_shape_vec[j + 1] = filter_shape_vec[j + 2]; - col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2]; - } - } else { - col_shape_vec[0] = out_grad_dims[out_grad_dims.size() - 1]; - for (size_t j = 0; j < data_dim; ++j) { - col_shape_vec[j + 1] = filter_shape_vec[j + 2]; - col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 1]; - } - } - DDim col_shape(phi::make_ddim(col_shape_vec)); - - // use col_matrix_shape in the gemm calculation - // size: (o_c * k_h * k_w, h * w) or (o_c * k_d * k_h * k_w, d * h * w) - DDim col_matrix_shape = phi::flatten_to_2d(col_shape, data_dim + 1); - - // output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first - // output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last - DDim output_shape = - phi::slice_ddim(output_grad->dims(), 1, output_grad->dims().size()); - - // input matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first - // input matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last - DDim input_matrix_shape; - if (data_layout != framework::DataLayout::kNHWC) { - input_matrix_shape = {in_dims[1], col_matrix_shape[1]}; - } else { - input_matrix_shape = {col_matrix_shape[1], in_dims[in_dims.size() - 1]}; - } - - // filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w) - DDim filter_matrix_shape; - if (data_layout != framework::DataLayout::kNHWC) { - filter_matrix_shape = {in_dims[1], col_matrix_shape[0] / groups}; - } else { - filter_matrix_shape = {in_dims[in_dims.size() - 1], - col_matrix_shape[0] / groups}; - } - filter.Resize(filter_matrix_shape); - - int in_step = - (data_layout != framework::DataLayout::kNHWC - ? static_cast(in_dims[1]) / groups - : static_cast(in_dims[in_dims.size() - 1]) / groups); - int col_step = static_cast(col_matrix_shape[0]) / groups; - - // convolution transpose grad on input: - // im2col + gemm (similar to conv-forward) - // input need to compute gradient - auto& dev_ctx = context.template device_context(); - auto blas = phi::funcs::GetBlas(dev_ctx); - if (input_grad || filter_grad) { - Tensor col; - col.mutable_data(col_shape, context.GetPlace()); - // col_matrix shares the same piece of data with col, - // but will be reshaped into a two-dimensional matrix shape - // to call the matrix multiplication interface. - Tensor col_matrix; - col_matrix.ShareDataWith(col); - col_matrix.Resize(col_matrix_shape); - - Tensor filter_grad_; - phi::funcs::SetConstant set_zero; - - math::Im2ColFunctor im2col; - math::Vol2ColFunctor vol2col; - math::ConcatFunctor concat_functor; - - if (input_grad) { - input_grad->mutable_data(context.GetPlace()); - set_zero(dev_ctx, input_grad, static_cast(0)); - } - if (filter_grad) { // filter_grad_ size (i_c, o_c/g, k_h, k_w) - filter_grad->mutable_data(context.GetPlace()); - set_zero(dev_ctx, filter_grad, static_cast(0)); - filter_grad_ = *filter_grad; - filter_grad_.Resize(filter_matrix_shape); - } - - size_t D = input->dims().size(); - for (int i = 0; i < batch_size; i++) { - // batch with size (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for - // channel_first - // batch with size (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for - // channel_last - Tensor output_grad_batch = - output_grad->Slice(i, i + 1).Resize(output_shape); - - if (data_dim == 2U) { - // im2col: dy -> col matrix - // from (o_c, o_h, o_w) to (o_c * k_h * k_w, i_h * i_w) for - // channel_first - // from (o_h, o_w, o_c) to (o_c * k_h * k_w, i_h * i_w) for - // channel_last - im2col(dev_ctx, output_grad_batch, dilations, strides, - std::vector{paddings[0], paddings[2], paddings[1], - paddings[3]}, - &col, data_layout); - } else if (data_dim == 3U) { - // vol2col: dy -> col_matrix - // from (o_c, o_d, o_h, o_w) to (o_c * k_d * k_h * k_w, i_d * i_h * - // i_w) for channel_first - // from (o_d, o_h, o_w, o_c) to (i_d * i_h * i_w, o_c * k_d * k_h * - // k_w) for channel_last - vol2col(dev_ctx, output_grad_batch, dilations, strides, paddings, - &col, data_layout); - } - - if (input_grad) { - // batch with size (i_c, i_h, i_w) or (i_h, i_w, i_c) - Tensor input_grad_batch = - input_grad->Slice(i, i + 1).Resize(input_matrix_shape); - - // gemm: dx = filter * dy - // (i_c, o_c * k_h * k_w) * (o_c * k_h * k_w, i_h * i_w) -> (i_c, i_h - // * i_w) - // or - // (i_c, o_c * k_d * k_h * k_w) * (o_c * k_d * k_h * k_w, i_d * i_h * - // i_w) -> (i_c, - // i_d, i_h, i_w) - // gemm: dx = dy^T * filter^T for channel_last - - std::vector input_grad_batch_vec; - for (int g = 0; g < groups; g++) { - // input_grad_slice: (i_c/g, i_h * i_w) or (i_c/g, i_d * i_h * i_w) - // for channel_first - // input_grad_slice: (i_h * i_w, i_c/g) or (i_d * i_h * i_w, i_c/g) - // for channel_last - // filter_slice: (i_c/g, o_c/g * k_h * k_w) - Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step); - // col_matrix_slice: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d * - // k_h * k_w, d * h * w) - Tensor col_matrix_slice = - col_matrix.Slice(g * col_step, (g + 1) * col_step); - if (data_layout != framework::DataLayout::kNHWC) { - Tensor input_grad_slice = - input_grad_batch.Slice(g * in_step, (g + 1) * in_step); - blas.MatMul(filter_slice, false, col_matrix_slice, false, - static_cast(1.0), &input_grad_slice, - static_cast(0.0)); - } else { - Tensor input_grad_slice; - Slice(context, &input_grad_batch, - &input_grad_slice, g * in_step, - (g + 1) * in_step, 1); - blas.MatMul(col_matrix_slice, true, filter_slice, true, - static_cast(1.0), &input_grad_slice, - static_cast(0.0)); - DDim input_grad_slice_shape; - if (data_dim == 2U) { - input_grad_slice_shape = {in_dims[1], in_dims[2], in_step}; - } else { - input_grad_slice_shape = {in_dims[1], in_dims[2], in_dims[3], - in_step}; - } - input_grad_slice = - input_grad_slice.Resize(input_grad_slice_shape); - input_grad_batch_vec.push_back(input_grad_slice); - } - } - if (data_layout == framework::DataLayout::kNHWC) { - concat_functor(dev_ctx, input_grad_batch_vec, - static_cast(D - 2), &input_grad_batch); - } - } - if (filter_grad) { - // input batch: (i_c, i_h * i_w) or (i_h, i_w * i_c) - Tensor in_batch = input->Slice(i, i + 1).Resize(input_matrix_shape); - // gemm: d_filter = x * dy^T - // (i_c, i_h * i_w) * (i_h * i_w, o_c * k_h * k_w) -> (i_c, o_c * k_h - // * k_w) - // or - // (i_c, i_d * i_h * i_w) * (i_d * i_h * i_w, o_c * k_d * k_h * k_w) - // -> (i_c, o_c * k_d * - // k_h * k_w) - // gemm: d_filter = x^T * dy^T for channel_last - - for (int g = 0; g < groups; g++) { - Tensor filter_grad_slice = - filter_grad_.Slice(g * in_step, (g + 1) * in_step); - Tensor col_matrix_slice = - col_matrix.Slice(g * col_step, (g + 1) * col_step); - if (data_layout != framework::DataLayout::kNHWC) { - Tensor in_batch_slice = - in_batch.Slice(g * in_step, (g + 1) * in_step); - blas.MatMul(in_batch_slice, false, col_matrix_slice, true, - static_cast(1.0), &filter_grad_slice, - static_cast(1.0)); - } else { - Tensor in_batch_slice; - Slice(context, &in_batch, &in_batch_slice, - g * in_step, (g + 1) * in_step, 1); - blas.MatMul(in_batch_slice, true, col_matrix_slice, true, - static_cast(1.0), &filter_grad_slice, - static_cast(1.0)); - } - } - } - } - } - } -}; - } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/conv_transpose_op_npu.cc b/paddle/fluid/operators/conv_transpose_op_npu.cc index 7d0ebf21829c21d06889c291aec2a53f4badc5d4..050ede78f72cfea7c7e20829d530167885181798 100644 --- a/paddle/fluid/operators/conv_transpose_op_npu.cc +++ b/paddle/fluid/operators/conv_transpose_op_npu.cc @@ -13,11 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/conv_transpose_op.h" + +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/npu/npu_op_runner.h" +#include "paddle/phi/kernels/cpu/conv_util.h" namespace paddle { namespace operators { +using Tensor = framework::Tensor; using NPUDeviceContext = platform::NPUDeviceContext; template @@ -55,8 +59,8 @@ class Conv2DTransposeNPUKernel : public framework::OpKernel { filter_data_dims = phi::slice_ddim(filter_dims, 2, in_dims.size()); std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&padding, &dilation, padding_algorithm, - in_data_dims, stride, ksize); + phi::UpdatePaddingAndDilation(&padding, &dilation, padding_algorithm, + in_data_dims, stride, ksize); // construct NPU attr std::vector strides(4, 1); @@ -137,8 +141,8 @@ class Conv2DTransposeGradNPUKernel : public framework::OpKernel { framework::DDim filter_data_dims = phi::slice_ddim(filter_dims, 2, filter_dims.size()); std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); + phi::UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + in_data_dims, strides, ksize); std::vector strides_vec(4, 1); std::vector dilations_vec(4, 1); diff --git a/paddle/fluid/operators/conv_transpose_op_xpu.cc b/paddle/fluid/operators/conv_transpose_op_xpu.cc index 12e1739f2a267582602f300e0f4ea8593b8c870a..b8bd3c4f006087273e1ae139d42d86891aabad1c 100644 --- a/paddle/fluid/operators/conv_transpose_op_xpu.cc +++ b/paddle/fluid/operators/conv_transpose_op_xpu.cc @@ -8,15 +8,22 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + #include "paddle/fluid/operators/conv_transpose_op.h" + #include #include #include +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/device_wrapper.h" +#include "paddle/phi/kernels/cpu/conv_util.h" + #ifdef PADDLE_WITH_XPU namespace paddle { namespace operators { +using Tensor = framework::Tensor; + // target_len == 2 || target_len == 4 inline std::vector vector_extend(const std::vector& src, int target_len) { @@ -61,8 +68,8 @@ class Conv2DTransposeXPUKernel : public framework::OpKernel { framework::DDim filter_data_dims = phi::slice_ddim(filter.dims(), 2, filter.dims().size()); std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); + phi::UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + in_data_dims, strides, ksize); const int batch_size = static_cast(input->dims()[0]); const int img_yc = static_cast(input->dims()[1]); @@ -135,8 +142,8 @@ class Conv2DTransposeGradXPUKernel : public framework::OpKernel { framework::DDim filter_data_dims = phi::slice_ddim(filter.dims(), 2, filter.dims().size()); std::vector ksize = phi::vectorize(filter_data_dims); - UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, - in_data_dims, strides, ksize); + phi::UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + in_data_dims, strides, ksize); const int batch_size = static_cast(input->dims()[0]); const int img_yc = static_cast(input->dims()[1]); diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 37d1a234b5767a3873bda6b41e6e410df1c452af..b680222f863505c57464abc3153ea9ff6ca19f6b 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -64,6 +64,45 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, } } +void ConvTransposeGradInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const MetaTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* dx, + MetaTensor* dfilter) { + GeneralBinaryGradInferMeta(x, filter, dx, dfilter); +} + +void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const MetaTensor& dout, + const MetaTensor& ddx, + const MetaTensor& ddfilter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* dx, + MetaTensor* dfilter, + MetaTensor* ddout) { + GeneralBinaryGradInferMeta(x, filter, dx, dfilter); + + if (ddout) { + ddout->share_meta(dout); + } +} + void GatherNdGradInferMeta(const MetaTensor& x, const MetaTensor& index, const MetaTensor& out_grad, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index 6bc2869825497647c68e8c5f057912393b994f0d..5c49a58a715a40842ea321169e200adf54124805 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -37,6 +37,37 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x, MetaTensor* dweight, MetaTensor* dbias); +void ConvTransposeGradInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const MetaTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* dx, + MetaTensor* dfilter); + +void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const MetaTensor& dout, + const MetaTensor& ddx, + const MetaTensor& ddfilter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* dx, + MetaTensor* dfilter, + MetaTensor* ddout); + void GatherNdGradInferMeta(const MetaTensor& x, const MetaTensor& index, const MetaTensor& out_grad, diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index aabb944db30b9f30394f092c245bc0307d8bbf3f..36a049eca0f30c4d5d292d23b94cbead53c71208 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -17,8 +17,10 @@ limitations under the License. */ #include #include #include "paddle/phi/common/data_type.h" +#include "paddle/phi/common/layout.h" #include "paddle/phi/core/ddim.h" #include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/common_shape.h" #include "paddle/phi/kernels/cpu/conv_util.h" @@ -312,51 +314,6 @@ void CompareAllInferMeta(const MetaTensor& x, out->set_dtype(DataType::BOOL); } -void CrossInferMeta(const MetaTensor& x, - const MetaTensor& y, - int axis, - MetaTensor* out) { - auto x_dim = x.dims(); - auto y_dim = y.dims(); - auto dim = axis; - - bool dims_match = phi::funcs::CheckDims(x_dim, y_dim); - PADDLE_ENFORCE_EQ( - dims_match, - true, - phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to " - "the 'shape' of Input(Y). But received " - "Input(X).dimensions = [%s], " - "Input(Y).dimensions = [%s]", - x_dim, - y_dim)); - - if (dim != DDim::kMaxRank) { - PADDLE_ENFORCE_EQ( - dim < x_dim.size() && dim >= (0 - x_dim.size()), - true, - phi::errors::OutOfRange( - "Attr(dim) is out of range, It's expected " - "to be in range of [-%d, %d]. But received Attr(dim) = %d.", - x_dim.size(), - x_dim.size() - 1, - dim)); - if (dim < 0) { - dim += x_dim.size(); - } - PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3, - true, - phi::errors::InvalidArgument( - "Input(X/Y).dims()[dim] should be equal to 3." - "But received Input(X/Y).dims()[dim] = %d.", - x_dim[dim])); - } - out->set_dims(x_dim); - out->set_dtype(x.dtype()); - out->set_layout(x.layout()); - out->share_lod(x); -} - void ConvInferMeta(const MetaTensor& input, const MetaTensor& filter, const std::vector& strides, @@ -512,6 +469,241 @@ void ConvInferMeta(const MetaTensor& input, out->set_dtype(input.dtype()); } +void ConvTransposeInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* out, + MetaConfig config) { + auto x_dims = x.dims(); + auto filter_dims = filter.dims(); + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + const DataLayout data_layout = + config.is_run_mkldnn_kernel + ? DataLayout::kNCHW + : paddle::framework::StringToDataLayout(data_format); + + PADDLE_ENFORCE_EQ( + x_dims.size() == 4 || x_dims.size() == 5, + true, + errors::InvalidArgument("Input of Op(conv_transpose) should be 4-D or " + "5-D Tensor. But received: %u-D Tensor, " + "the shape of input is [%s]", + x_dims.size(), + x_dims)); + PADDLE_ENFORCE_EQ( + x_dims.size(), + filter_dims.size(), + errors::InvalidArgument( + "The input's dimension size and filter's dimension size of " + "Op (conv_transpose) should be equal. But received: the shape of " + "input is [%s], the dimension size of input is [%d], the shape " + "of filter is [%s], the dimension size of filter is [%d]. ", + x_dims, + x_dims.size(), + filter_dims, + filter_dims.size())); + + int stride_size = strides.size(); + for (int i = 0; i < stride_size; ++i) { + PADDLE_ENFORCE_GT( + strides[i], + 0, + errors::InvalidArgument( + "The stride of Op(Conv) should be larget than 0, but received " + "stride is %d.", + strides[i])); + } + + int in_sub_stride_size = x_dims.size() - stride_size; + + PADDLE_ENFORCE_EQ( + x_dims.size() - strides.size(), + 2U, + errors::InvalidArgument( + "The input's dimension size minus Attr(stride)'s size must " + "be euqal to 2 for Op(conv_transpose). But received: [%d], the " + "input's dimension size is [%d], the shape of input " + "is [%s], the Attr(stride)'s size is [%d].", + in_sub_stride_size, + x_dims.size(), + x_dims, + strides.size())); + if (output_size.size()) + PADDLE_ENFORCE_EQ( + output_size.size(), + strides.size(), + errors::InvalidArgument( + "The Attr(output_size) and Attr(stride) of Op(conv_transpose) " + "should be the same.")); + if (output_padding.size()) + PADDLE_ENFORCE_EQ( + output_padding.size(), + strides.size(), + errors::InvalidArgument( + "The Attr(output_padding) and Attr(stride) of Op(conv_transpose) " + "should be the same.")); + + const int64_t C = + (data_layout != DataLayout::kNHWC ? x_dims[1] + : x_dims[x_dims.size() - 1]); + PADDLE_ENFORCE_EQ( + C, + filter_dims[0], + errors::InvalidArgument( + "The number of input channels should be equal to filter channels " + "for Op(conv_transpose). But received: the input's channels is " + "[%d], the shape of input is [%s], the filter's channels is [%d], " + "the shape of filter is [%s]. The data_format is %s." + "The error may come from wrong data_format setting.", + C, + x_dims, + filter_dims[0], + filter_dims, + data_format)); + + DDim x_data_dims; + if (data_layout != DataLayout::kNHWC) { + x_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + } else { + x_data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1); + } + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize); + + std::vector output_shape({x_dims[0]}); + if (data_layout != DataLayout::kNHWC) { + output_shape.push_back(filter_dims[1] * groups); + } + const int offset = (data_layout != DataLayout::kNHWC ? 2 : 1); + for (size_t i = 0; i < strides.size(); ++i) { + auto filter_extent = dilations_[i] * (filter_dims[i + 2] - 1) + 1; + auto infer_shape = (config.is_runtime || x_dims[i + offset] > 0) + ? (x_dims[i + offset] - 1) * strides[i] - + paddings_[2 * i] - paddings_[2 * i + 1] + + filter_extent + : -1; + if (output_size.size()) { + if (config.is_runtime) { + PADDLE_ENFORCE_GE( + output_size[i], + infer_shape, + errors::InvalidArgument( + "output_size of Op(ConvTransposeOp) should not be " + "less than the infered output size. But received output_size = " + "[%s], whose dim %d is less than the infered output size [%s]", + make_ddim(output_size).to_str(), + i, + infer_shape)); + PADDLE_ENFORCE_LT( + output_size[i], + infer_shape + strides[i], + errors::InvalidArgument( + "output_size of Op(ConvTransposeOp) should be less " + "than infered size + stride. But received output_size = [%s], " + "whose dim %d is not less than the infered output size (%d) + " + "stride (%d) = %d", + make_ddim(output_size).to_str(), + i, + infer_shape, + strides[i], + infer_shape + strides[i])); + } + output_shape.push_back(output_size[i]); + } else if (output_padding.size()) { + if (config.is_runtime) { + PADDLE_ENFORCE_GE( + output_padding[i], + 0, + errors::InvalidArgument( + "output_padding of Op(ConvTransposeOp) should not be " + "less than the 0. But received output_padding = " + "[%s], whose dim %d is less than 0", + make_ddim(output_padding).to_str(), + i)); + PADDLE_ENFORCE_LT( + output_padding[i], + std::max(strides[i], dilations_[i]), + errors::InvalidArgument( + "output_padding of Op(ConvTransposeOp) should be less " + "than either stride or dilation. But received output_size = " + "[%s], " + "whose dim %d is not less than either stride (%d) or " + "dilation (%d)", + make_ddim(output_size).to_str(), + i, + strides[i], + dilations_[i])); + } + output_shape.push_back((infer_shape + output_padding[i])); + } else { + output_shape.push_back(infer_shape); + } + } + if (data_layout == DataLayout::kNHWC) { + output_shape.push_back(filter_dims[1] * groups); + } + + out->set_dims(make_ddim(output_shape)); + out->set_dtype(x.dtype()); +} + +void CrossInferMeta(const MetaTensor& x, + const MetaTensor& y, + int axis, + MetaTensor* out) { + auto x_dim = x.dims(); + auto y_dim = y.dims(); + auto dim = axis; + + bool dims_match = phi::funcs::CheckDims(x_dim, y_dim); + PADDLE_ENFORCE_EQ( + dims_match, + true, + phi::errors::InvalidArgument("The 'shape' of Input(X) should be equal to " + "the 'shape' of Input(Y). But received " + "Input(X).dimensions = [%s], " + "Input(Y).dimensions = [%s]", + x_dim, + y_dim)); + + if (dim != DDim::kMaxRank) { + PADDLE_ENFORCE_EQ( + dim < x_dim.size() && dim >= (0 - x_dim.size()), + true, + phi::errors::OutOfRange( + "Attr(dim) is out of range, It's expected " + "to be in range of [-%d, %d]. But received Attr(dim) = %d.", + x_dim.size(), + x_dim.size() - 1, + dim)); + if (dim < 0) { + dim += x_dim.size(); + } + PADDLE_ENFORCE_EQ(x_dim[dim] == 3 && y_dim[dim] == 3, + true, + phi::errors::InvalidArgument( + "Input(X/Y).dims()[dim] should be equal to 3." + "But received Input(X/Y).dims()[dim] = %d.", + x_dim[dim])); + } + out->set_dims(x_dim); + out->set_dtype(x.dtype()); + out->set_layout(x.layout()); + out->share_lod(x); +} + void DistInferMeta(const MetaTensor& x, const MetaTensor& y, float p, diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index d770a096de7c922c674b7edda55ae8cb531a6d00..9a54c4c5fa62d4c58e527b9efbf2e977f72354ec 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -83,6 +83,19 @@ void ConvInferMeta(const MetaTensor& input, MetaTensor* out, MetaConfig config = MetaConfig()); +void ConvTransposeInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* out, + MetaConfig config = MetaConfig()); + void CrossInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, diff --git a/paddle/phi/kernels/conv_transpose_grad_kernel.h b/paddle/phi/kernels/conv_transpose_grad_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..2b1c0c1a934cf64dad552b36ce9cfd3808be6810 --- /dev/null +++ b/paddle/phi/kernels/conv_transpose_grad_kernel.h @@ -0,0 +1,90 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void Conv2dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter); + +template +void Conv2dTransposeDoubleGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const DenseTensor& ddx, + const DenseTensor& ddfilter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter, + DenseTensor* ddout); + +template +void Conv3dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter); + +template +void DepthwiseConv2dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter); + +} // namespace phi diff --git a/paddle/phi/kernels/conv_transpose_kernel.h b/paddle/phi/kernels/conv_transpose_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..de56f13ddf73e5c33e49227468e565e374d14c84 --- /dev/null +++ b/paddle/phi/kernels/conv_transpose_kernel.h @@ -0,0 +1,65 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/phi/core/dense_tensor.h" + +namespace phi { + +template +void Conv2dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out); + +template +void Conv3dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out); + +template +void DepthwiseConv2dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out); + +} // namespace phi diff --git a/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc b/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..8d0749500695c5db2f07872e59d295981c598c9e --- /dev/null +++ b/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc @@ -0,0 +1,70 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/conv_transpose_grad_kernel.h" +#include "paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h" + +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void DepthwiseConv2dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawKernel(ctx, + x, + filter, + dout, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + dx, + dfilter); +} + +} // namespace phi + +PD_REGISTER_KERNEL(conv2d_transpose_grad, + CPU, + ALL_LAYOUT, + phi::Conv2dTransposeGradKernel, + float, + double) {} +PD_REGISTER_KERNEL(conv3d_transpose_grad, + CPU, + ALL_LAYOUT, + phi::Conv3dTransposeGradKernel, + float, + double) {} +PD_REGISTER_KERNEL(depthwise_conv2d_transpose_grad, + CPU, + ALL_LAYOUT, + phi::DepthwiseConv2dTransposeGradKernel, + float, + double) {} diff --git a/paddle/phi/kernels/cpu/conv_transpose_kernel.cc b/paddle/phi/kernels/cpu/conv_transpose_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..b4cacc850938ea87c34499c68b3aa8821e65943d --- /dev/null +++ b/paddle/phi/kernels/cpu/conv_transpose_kernel.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/conv_transpose_kernel.h" +#include "paddle/phi/kernels/impl/conv_transpose_kernel_impl.h" + +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void DepthwiseConv2dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + ConvTransposeRawKernel(ctx, + x, + filter, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + out); +} + +} // namespace phi + +PD_REGISTER_KERNEL(conv2d_transpose, + CPU, + ALL_LAYOUT, + phi::Conv2dTransposeKernel, + float, + double) {} +PD_REGISTER_KERNEL(conv3d_transpose, + CPU, + ALL_LAYOUT, + phi::Conv3dTransposeKernel, + float, + double) {} +PD_REGISTER_KERNEL(depthwise_conv2d_transpose, + CPU, + ALL_LAYOUT, + phi::DepthwiseConv2dTransposeKernel, + float, + double) {} diff --git a/paddle/phi/kernels/funcs/slice.h b/paddle/phi/kernels/funcs/slice.h index 0a50dceb0a00758b2b0ad5f92219812083cb5f24..38b127541650be9d63c840dfd209217a3be4c936 100644 --- a/paddle/phi/kernels/funcs/slice.h +++ b/paddle/phi/kernels/funcs/slice.h @@ -123,5 +123,56 @@ DenseTensor Slice(const Context& dev_ctx, return ret; } +// Use in conv_transpose kernel +template +static void Slice(const Context& ctx, + const DenseTensor* input, + DenseTensor* out, + const std::vector& begin_vec, + const std::vector& end_vec, + const std::vector& axes_vec) { + auto& place = *ctx.eigen_device(); + auto in_dims = input->dims(); + auto offsets = Eigen::DSizes(); + auto extents = Eigen::DSizes(); + for (size_t i = 0; i < D; ++i) { + offsets[i] = 0; + extents[i] = in_dims[i]; + } + + std::vector out_shape_vec = vectorize(in_dims); + for (size_t i = 0; i < axes_vec.size(); ++i) { + offsets[axes_vec[i]] = begin_vec[i]; + extents[axes_vec[i]] = end_vec[i] - begin_vec[i]; + out_shape_vec[axes_vec[i]] = end_vec[i] - begin_vec[i]; + } + + DDim out_dims(make_ddim(out_shape_vec)); + out->Resize(out_dims); + ctx.template Alloc(out); + + auto in_t = + EigenTensor::From(*input); + auto out_t = EigenTensor::From( + *out, out_dims); + + funcs::EigenSlice, T, D>::Eval( + place, out_t, in_t, offsets, extents); + out->Resize(out_dims); +} + +template +static void Slice(const Context& ctx, + const DenseTensor* input, + DenseTensor* out, + int64_t begin_idx, + int64_t end_idx, + int64_t axes) { + std::vector begin_vec = {begin_idx}; + std::vector end_vec = {end_idx}; + std::vector axes_vec = {axes}; + Slice(ctx, input, out, begin_vec, end_vec, axes_vec); +} + } // namespace funcs } // namespace phi diff --git a/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu b/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..e583e13650aebf2792014a9fb9e46ac12916af61 --- /dev/null +++ b/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu @@ -0,0 +1,157 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/conv_transpose_grad_kernel.h" +#include "paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h" + +#include "paddle/phi/common/layout.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/gpu/depthwise_conv.h" + +namespace phi { + +template +void Conv2dTransposeDoubleGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const DenseTensor& ddx, + const DenseTensor& ddfilter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter, + DenseTensor* ddout) { + ConvTransposeGradRawKernel(ctx, + x, + filter, + dout, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + dx, + dfilter); +} + +template +void DepthwiseConv2dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + const DataLayout data_layout = + paddle::framework::StringToDataLayout(data_format); + DenseTensor filter_ = filter; + + if (!dx && !dfilter) { + return; + } + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + auto x_dims = x.dims(); + auto filter_dims = filter_.dims(); + + DDim in_data_dims; + if (data_layout != DataLayout::kNHWC) { + in_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + } else { + in_data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1); + } + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, in_data_dims, strides, ksize); + + if (dx) { + paddle::operators::math::DepthwiseConvFunctor depthwiseConv; + depthwiseConv(ctx, + dout, + filter_, + strides, + std::vector{ + paddings_[0], paddings_[2], paddings_[1], paddings_[3]}, + dilations_, + dx, + data_layout); + } + + if (dfilter) { + funcs::SetConstant set_zero; + ctx.template Alloc(dfilter); + set_zero(ctx, dfilter, static_cast(0)); + + paddle::operators::math::DepthwiseConvFilterGradFunctor + depthwiseConvFilterGrad; + depthwiseConvFilterGrad( + ctx, + dout, + x, + strides, + std::vector{ + paddings_[0], paddings_[2], paddings_[1], paddings_[3]}, + dilations_, + dfilter, + data_layout); + } +} + +} // namespace phi + +PD_REGISTER_KERNEL(conv2d_transpose_grad, + GPU, + ALL_LAYOUT, + phi::Conv2dTransposeGradKernel, + float, + double) {} +PD_REGISTER_KERNEL(conv2d_transpose_grad_grad, + GPU, + ALL_LAYOUT, + phi::Conv2dTransposeDoubleGradKernel, + float, + double) {} +PD_REGISTER_KERNEL(conv3d_transpose_grad, + GPU, + ALL_LAYOUT, + phi::Conv3dTransposeGradKernel, + float, + double) {} +PD_REGISTER_KERNEL(depthwise_conv2d_transpose_grad, + GPU, + ALL_LAYOUT, + phi::DepthwiseConv2dTransposeGradKernel, + float, + double) {} diff --git a/paddle/phi/kernels/gpu/conv_transpose_kernel.cu b/paddle/phi/kernels/gpu/conv_transpose_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b7d34a5baf3df530d003b3475bea8702d1440c77 --- /dev/null +++ b/paddle/phi/kernels/gpu/conv_transpose_kernel.cu @@ -0,0 +1,118 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/conv_transpose_kernel.h" +#include "paddle/phi/kernels/impl/conv_transpose_kernel_impl.h" + +#include "paddle/phi/common/layout.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/gpu/depthwise_conv.h" + +namespace phi { + +template +void DepthwiseConv2dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + const DataLayout data_layout = + paddle::framework::StringToDataLayout(data_format); + DenseTensor filter_ = filter; + ctx.template Alloc(out); + + PADDLE_ENFORCE_EQ( + groups, + filter_.dims()[0], + errors::InvalidArgument( + "groups should be error to the 1st dimension of filter_. But " + "received groups is %d and filter dimension[0] is %d", + groups, + filter_.dims()[0])); + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + for (auto v : dilations_) { + PADDLE_ENFORCE_EQ( + v, + 1, + errors::InvalidArgument("dilations should be 1 in depthwise conv. " + "But received dilations is %d", + v)); + } + + auto x_dims = x.dims(); + auto filter_dims = filter_.dims(); + + DDim in_data_dims; + if (data_layout != DataLayout::kNHWC) { + in_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + } else { + in_data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1); + } + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, in_data_dims, strides, ksize); + + ctx.template Alloc(out); + + funcs::SetConstant set_zero; + set_zero(ctx, out, static_cast(0)); + + paddle::operators::math::DepthwiseConvInputGradFunctor + depthwiseConvInputGrad; + depthwiseConvInputGrad( + ctx, + *out, + filter, + x, + strides, + std::vector{paddings_[0], paddings_[2], paddings_[1], paddings_[3]}, + dilations_, + out, + data_layout); +} + +} // namespace phi + +PD_REGISTER_KERNEL(conv2d_transpose, + GPU, + ALL_LAYOUT, + phi::Conv2dTransposeKernel, + float, + double) {} +PD_REGISTER_KERNEL(conv3d_transpose, + GPU, + ALL_LAYOUT, + phi::Conv3dTransposeKernel, + float, + double) {} +PD_REGISTER_KERNEL(depthwise_conv2d_transpose, + GPU, + ALL_LAYOUT, + phi::DepthwiseConv2dTransposeKernel, + float, + double) {} diff --git a/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu b/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..2893bd74b1bce691ad9b9e3333e6afbf2a2850fd --- /dev/null +++ b/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu @@ -0,0 +1,1122 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/kernels/conv_transpose_grad_kernel.h" + +#include +#include "paddle/phi/backends/dynload/cudnn.h" +#include "paddle/phi/common/float16.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/batch_norm_utils.h" +#include "paddle/phi/kernels/funcs/math_function.h" +#include "paddle/phi/kernels/funcs/padding.h" +#include "paddle/phi/kernels/funcs/slice.h" +#include "paddle/phi/kernels/transpose_kernel.h" + +#ifdef PADDLE_WITH_HIP +#include "paddle/fluid/operators/conv_miopen_helper.h" +#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h" +#else +#include "paddle/fluid/operators/conv_cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h" +#endif + +namespace phi { + +using GPUDNNDataLayout = paddle::platform::DataLayout; + +template +void ConvTransposeGradRawGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + const T* filter_data = filter.data(); + std::vector paddings_ = paddings; + std::vector dilations_ = + dilations; // cudnn v5 does not support dilations + const GPUDNNDataLayout data_layout = + (data_format != "NHWC" ? GPUDNNDataLayout::kNCHW + : GPUDNNDataLayout::kNHWC); + + // if channel_last, transpose to channel_first + DenseTensor x_transpose; + DenseTensor dout_transpose; + std::vector x_vec = vectorize(x.dims()); + std::vector out_vec = vectorize(dout.dims()); + if (data_layout == GPUDNNDataLayout::kNHWC) { + if (strides.size() == 2U) { + std::vector axis = {0, 3, 1, 2}; + for (size_t i = 0; i < axis.size(); ++i) { + x_vec[i] = x.dims()[axis[i]]; + out_vec[i] = dout.dims()[axis[i]]; + } + x_transpose = Transpose(ctx, x, axis); + dout_transpose = Transpose(ctx, dout, axis); + } else if (strides.size() == 3U) { + std::vector axis = {0, 4, 1, 2, 3}; + for (size_t i = 0; i < axis.size(); ++i) { + x_vec[i] = x.dims()[axis[i]]; + out_vec[i] = dout.dims()[axis[i]]; + } + x_transpose = Transpose(ctx, x, axis); + dout_transpose = Transpose(ctx, dout, axis); + } + } else { + x_transpose = x; + dout_transpose = dout; + } + + // update padding and dilation + auto x_dims = x_transpose.dims(); + auto filter_dims = filter.dims(); + DDim x_data_dims; + x_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize); + + int data_dim = strides.size(); // 2d or 3d + bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim); + + std::vector x_pad(x_dims.size() * 2, 0); + DenseTensor transformed_dout; + std::vector padding_common(data_dim, 0); + if (!is_sys_pad) { + std::vector padding_diff(data_dim); + std::vector new_dout_shape_vec(data_dim + 2); + new_dout_shape_vec[0] = dout_transpose.dims()[0]; + new_dout_shape_vec[1] = dout_transpose.dims()[1]; + + for (size_t i = 0; i < data_dim; ++i) { + padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]); + padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]); + new_dout_shape_vec[i + 2] = + dout_transpose.dims()[i + 2] + padding_diff[i]; + x_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i]; + x_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i]; + } + + transformed_dout.Resize(make_ddim(new_dout_shape_vec)); + ctx.template Alloc(&transformed_dout); + + const int rank = x_transpose.dims().size(); + T pad_value(0.0); + switch (rank) { + case 4: { + funcs::PadFunction( + ctx, x_pad, dout_transpose, pad_value, &transformed_dout); + } break; + case 5: { + funcs::PadFunction( + ctx, x_pad, dout_transpose, pad_value, &transformed_dout); + } break; + default: + PADDLE_THROW(errors::InvalidArgument( + "Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor.")); + } + } else { + transformed_dout = dout_transpose; + if (paddings_.size() == data_dim) { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[i]; + } + } else { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[2 * i]; + } + } + } + + const T* x_data = x_transpose.data(); + const T* dout_data = transformed_dout.data(); + out_vec = vectorize(transformed_dout.dims()); + + // ------------------- cudnn descriptors --------------------- + GPUDNNDataLayout layout; + + if (strides.size() == 2U) { + layout = GPUDNNDataLayout::kNCHW; + } else { + layout = GPUDNNDataLayout::kNCDHW; + } + + int iwo_groups = groups; + int c_groups = 1; +#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) + iwo_groups = 1; + c_groups = groups; + groups = 1; +#endif + + auto dtype = paddle::platform::CudnnDataType::type; + + paddle::operators::ConvArgs args1{&transformed_dout, + &filter, + &x_transpose, + strides, + padding_common, + dilations_, + dtype}; + paddle::operators::ConvArgs args2{&transformed_dout, + &filter, + &x_transpose, + strides, + padding_common, + dilations_, + dtype}; + +#ifdef PADDLE_WITH_HIP + miopenConvFwdAlgorithm_t data_algo{}; + miopenConvBwdWeightsAlgorithm_t filter_algo{}; +#else + cudnnConvolutionFwdAlgo_t data_algo{}; + cudnnConvolutionBwdFilterAlgo_t filter_algo{}; +#endif + + auto layout_tensor = paddle::platform::GetCudnnTensorFormat(layout); + size_t workspace_size = 0; + auto handle = ctx.cudnn_handle(); + bool deterministic = FLAGS_cudnn_deterministic; + T* dx_data = nullptr; + T* dfilter_data = nullptr; + + if (dx) { + dx_data = ctx.template Alloc(dx); + args1.handle = handle; + args1.idesc.set(transformed_dout, iwo_groups); + args1.wdesc.set(filter, layout_tensor, iwo_groups); + args1.odesc.set(x_transpose, iwo_groups); + args1.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_groups); +#ifdef PADDLE_WITH_HIP + using search1 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search1::GetWorkspaceSize(args1)); + data_algo = + search1::Find(args1, false, deterministic, workspace_size, ctx); +#else + using search1 = + paddle::operators::SearchAlgorithm; + data_algo = search1::Find(args1, false, deterministic, ctx); + workspace_size = + std::max(workspace_size, search1::GetWorkspaceSize(args1, data_algo)); +#endif + } + + if (dfilter) { + dfilter_data = ctx.template Alloc(dfilter); + args2.handle = handle; + args2.idesc.set(transformed_dout, iwo_groups); + args2.wdesc.set(*dfilter, layout_tensor, iwo_groups); + args2.odesc.set(x_transpose, iwo_groups); + args2.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_groups); +#ifdef PADDLE_WITH_HIP + using search2 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); + filter_algo = + search2::Find(args2, false, deterministic, workspace_size, ctx); +#else + using search2 = + paddle::operators::SearchAlgorithm; + filter_algo = search2::Find(args2, false, deterministic, ctx); + workspace_size = + std::max(workspace_size, search2::GetWorkspaceSize(args2, filter_algo)); +#endif + } + + // ------------------- cudnn conv backward data --------------------- + // FIxME(typhoonzero): template type T may not be the same as cudnn call. + int x_offset = x.numel() / x.dims()[0] / groups; + int dout_offset = + transformed_dout.numel() / transformed_dout.dims()[0] / groups; + int filter_offset = filter.numel() / groups; + paddle::operators::ScalingParamType alpha = 1.0f; + paddle::operators::ScalingParamType beta = 0.0f; + auto workspace_handle = ctx.cudnn_workspace_handle(); + if (dx) { + // Because beta is zero, it is unnecessary to reset dx. + for (int g = 0; g < groups; g++) { +#ifdef PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS( + dynload::miopenConvolutionForward(handle, + &alpha, + args1.idesc.desc(), + dout_data + dout_offset * g, + args1.wdesc.desc(), + filter_data + filter_offset * g, + args1.cdesc.desc(), + data_algo, + &beta, + args1.odesc.desc(), + dx_data + x_offset * g, + cudnn_workspace, + workspace_size)); + }; +#else // PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS( + dynload::cudnnConvolutionForward(handle, + &alpha, + args1.idesc.desc(), + dout_data + dout_offset * g, + args1.wdesc.desc(), + filter_data + filter_offset * g, + args1.cdesc.desc(), + data_algo, + cudnn_workspace, + workspace_size, + &beta, + args1.odesc.desc(), + dx_data + x_offset * g)); + }; +#endif // PADDLE_WITH_HIP + workspace_handle.RunFunc(cudnn_func, workspace_size); + } + + if (data_layout == GPUDNNDataLayout::kNHWC) { + DenseTensor dx_transpose; + DenseTensor dx_nchw; + dx_nchw.ShareDataWith(*dx); + dx_nchw.Resize(make_ddim(x_vec)); + if (strides.size() == 2U) { + std::vector axis = {0, 2, 3, 1}; + dx_transpose = Transpose(ctx, dx_nchw, axis); + *dx = dx_transpose; + } else if (strides.size() == 3U) { + std::vector axis = {0, 2, 3, 4, 1}; + dx_transpose = Transpose(ctx, dx_nchw, axis); + *dx = dx_transpose; + } + } + } + + // ------------------- cudnn conv backward filter --------------------- + if (dfilter) { + // Because beta is zero, it is unnecessary to reset dfilter. + // Gradient with respect to the filter + for (int g = 0; g < groups; g++) { +#ifdef PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardWeights( + handle, + &alpha, + args2.odesc.desc(), + x_data + x_offset * g, + args2.idesc.desc(), + dout_data + dout_offset * g, + args2.cdesc.desc(), + filter_algo, + &beta, + args2.wdesc.desc(), + dfilter_data + filter_offset * g, + cudnn_workspace, + workspace_size)); + }; +#else // PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter( + handle, + &alpha, + args2.idesc.desc(), + dout_data + dout_offset * g, + args2.odesc.desc(), + x_data + x_offset * g, + args2.cdesc.desc(), + filter_algo, + cudnn_workspace, + workspace_size, + &beta, + args2.wdesc.desc(), + dfilter_data + filter_offset * g)); + }; +#endif // PADDLE_WITH_HIP + workspace_handle.RunFunc(cudnn_func, workspace_size); + } + } +} + +template +void Conv2dTransposeGradGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings_, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations_, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawGPUDNNKernel(ctx, + x, + filter, + dout, + strides, + paddings_, + padding_algorithm, + groups, + dilations_, + data_format, + dx, + dfilter); +} + +/* + * Inputs: I, filter, dout, ddI, ddfilter + * Outputs: ddout, dfilter, dI + * ddo = conv_bp_data(filter, ddI) + conv_bp_data(ddfilter, I) + * dfilter = conv_bp_filter(dout, ddI) + * dI = conv(dout, ddfilter) + */ +template +void Conv2dTransposeDoubleGradGPUDNNKernel( + const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const DenseTensor& ddx, + const DenseTensor& ddfilter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter, + DenseTensor* ddout) { + if (dx) { + ctx.template Alloc(dx); + } + if (dfilter) { + ctx.template Alloc(dfilter); + } + if (ddout) { + ctx.template Alloc(ddout); + funcs::SetConstant set_zero; + set_zero(ctx, ddout, static_cast(0)); + } + + const T* filter_ = filter.data(); + const T* dout_ = dout.data(); + const T* ddx_ = nullptr; + const T* ddfilter_ = nullptr; + T* dx_ = nullptr; + T* dfilter_ = nullptr; + T* ddout_ = nullptr; + T* transformed_dx_ = nullptr; + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + bool deterministic = FLAGS_cudnn_deterministic; + const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); + + // transform DenseTensors to channel first----------- + DenseTensor transformed_x_channel(x.type()); + DenseTensor transformed_dout_channel(dout.type()); + DenseTensor transformed_ddx_channel(x.type()); + + DenseTensor transformed_dx_channel(x.type()); + DenseTensor transformed_ddout_channel(dout.type()); + + if (channel_last) { + ResizeToChannelFirst(ctx, &x, &transformed_x_channel); + TransToChannelFirst(ctx, &x, &transformed_x_channel); + + ResizeToChannelFirst(ctx, &dout, &transformed_dout_channel); + TransToChannelFirst(ctx, &dout, &transformed_dout_channel); + + ResizeToChannelFirst(ctx, &ddx, &transformed_ddx_channel); + TransToChannelFirst(ctx, &ddx, &transformed_ddx_channel); + + if (dx) { + ResizeToChannelFirst(ctx, dx, &transformed_dx_channel); + ctx.template Alloc(&transformed_dx_channel); + } + if (ddout) { + ResizeToChannelFirst(ctx, ddout, &transformed_ddout_channel); + } + } else { + transformed_x_channel = x; + transformed_dout_channel = dout; + transformed_ddx_channel = ddx; + + if (dx) { + transformed_dx_channel = *dx; + } + } + std::vector out_vec = vectorize(transformed_dout_channel.dims()); + + auto x_dims = transformed_x_channel.dims(); + auto filter_dims = filter.dims(); + DDim x_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize); + + int data_dim = strides.size(); // 2d or 3d + bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim); + DenseTensor transformed_x(x.type()); + DenseTensor transformed_ddx(x.type()); + + DenseTensor transformed_dout(dout.type()); + + std::vector padding_common(data_dim, 0); + std::vector input_pad(x.dims().size() * 2, 0); + + if (!is_sys_pad) { + // get pad + std::vector padding_diff(data_dim); + std::vector new_input_shape_vec(data_dim + 2); + std::vector new_output_grad_shape_vec(data_dim + 2); + + new_input_shape_vec[0] = transformed_x_channel.dims()[0]; + new_input_shape_vec[1] = transformed_x_channel.dims()[1]; + + new_output_grad_shape_vec[0] = transformed_dout_channel.dims()[0]; + new_output_grad_shape_vec[1] = transformed_dout_channel.dims()[1]; + + for (size_t i = 0; i < data_dim; ++i) { + padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]); + padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]); + new_input_shape_vec[i + 2] = + transformed_x_channel.dims()[i + 2] + padding_diff[i]; + + new_output_grad_shape_vec[i + 2] = + transformed_dout_channel.dims()[i + 2] + padding_diff[i]; + + input_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i]; + input_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i]; + } + DDim new_input_shape(make_ddim(new_input_shape_vec)); + transformed_x.Resize(new_input_shape); + transformed_ddx.Resize(new_input_shape); + transformed_dout.Resize(make_ddim(new_output_grad_shape_vec)); + + ctx.template Alloc(&transformed_x); + ctx.template Alloc(&transformed_ddx); + ctx.template Alloc(&transformed_dout); + + // pad for input + const int rank = x.dims().size(); + T pad_value(0.0); + switch (rank) { + case 4: { + funcs::PadFunction( + ctx, input_pad, transformed_x_channel, pad_value, &transformed_x); + funcs::PadFunction(ctx, + input_pad, + transformed_dout_channel, + pad_value, + &transformed_dout); + funcs::PadFunction(ctx, + input_pad, + transformed_ddx_channel, + pad_value, + &transformed_ddx); + } break; + case 5: { + funcs::PadFunction( + ctx, input_pad, transformed_x_channel, pad_value, &transformed_x); + funcs::PadFunction(ctx, + input_pad, + transformed_ddx_channel, + pad_value, + &transformed_ddx); + } break; + default: + PADDLE_THROW(errors::InvalidArgument( + "ConvOp only support tensors with 4 or 5 dimensions.")); + } + } else { + transformed_x = transformed_x_channel; + transformed_dout = transformed_dout_channel; + transformed_ddx = transformed_ddx_channel; + + if (paddings_.size() == data_dim) { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[i]; + } + } else { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[2 * i]; + } + } + } + + std::vector starts(data_dim, 0); + std::vector ends(data_dim, 0); + std::vector axes(data_dim, 0); + for (size_t i = 0; i < data_dim; ++i) { + starts[i] = input_pad[2 * i + 4] * (strides[i] + 1); + ends[i] = starts[i] + out_vec[i + 2]; + axes[i] = i + 2; + } + + std::vector transformed_out_vec = out_vec; + for (size_t i = 0; i < data_dim; ++i) { + transformed_out_vec[i + 2] = + out_vec[i + 2] + + (input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] - + 2 * padding_common[i] + paddings_[2 * i] + paddings_[2 * i + 1]; + } + + if (!is_sys_pad) { + transformed_ddout_channel.Resize(make_ddim(transformed_out_vec)); + ctx.template Alloc(&transformed_ddout_channel); + } else { + ctx.template Alloc(ddout); + transformed_ddout_channel = *ddout; + transformed_ddout_channel.Resize(make_ddim(transformed_out_vec)); + } + + const T* x_ = transformed_x.data(); + + int iwo_group = groups; + int c_group = 1; +#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) + iwo_group = 1; + c_group = groups; + groups = 1; +#endif + auto dtype = paddle::platform::CudnnDataType::type; + + auto handle = ctx.cudnn_handle(); + + paddle::operators::ConvArgs args1{&transformed_ddout_channel, + &filter, + &transformed_ddx, + strides, + padding_common, + dilations_, + dtype}; + paddle::operators::ConvArgs args2{&transformed_ddout_channel, + &ddfilter, + &transformed_x, + strides, + padding_common, + dilations_, + dtype}; + + paddle::operators::ConvArgs args3{&transformed_dout, + dfilter, + &transformed_ddx_channel, + strides, + padding_common, + dilations_, + dtype}; + paddle::operators::ConvArgs args4{&transformed_dout, + &ddfilter, + &transformed_dx_channel, + strides, + padding_common, + dilations_, + dtype}; +#ifdef PADDLE_WITH_HIP + miopenConvBwdDataAlgorithm_t bwd_algo1 = + static_cast(0); + miopenConvBwdDataAlgorithm_t bwd_algo2 = + static_cast(0); + miopenConvFwdAlgorithm_t data_algo = static_cast(0); + miopenConvBwdWeightsAlgorithm_t filter_algo = + static_cast(0); +#else + cudnnConvolutionBwdDataAlgo_t bwd_algo1 = + static_cast(0); + cudnnConvolutionBwdDataAlgo_t bwd_algo2 = + static_cast(0); + cudnnConvolutionFwdAlgo_t data_algo = + static_cast(0); + cudnnConvolutionBwdFilterAlgo_t filter_algo = + static_cast(0); +#endif + + auto layout = paddle::platform::GetCudnnTensorFormat(GPUDNNDataLayout::kNCHW); + + // ddo = conv(ddI, filter) + conv(I, ddfilter) + size_t workspace_size = 0; + + T* transformed_ddout_channel_ = nullptr; + + if (ddout) { + ddout_ = ddout->data(); + transformed_ddout_channel_ = transformed_ddout_channel.data(); + + args1.handle = handle; + args1.idesc.set(transformed_ddout_channel, iwo_group); + args1.wdesc.set(filter, layout, iwo_group); + args1.odesc.set(transformed_ddx, iwo_group); + args1.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search1 = + paddle::operators::SearchAlgorithm; + workspace_size = search1::GetWorkspaceSize(args1); + bwd_algo1 = + search1::Find(args1, false, deterministic, workspace_size, ctx); +#else + using search1 = + paddle::operators::SearchAlgorithm; + bwd_algo1 = search1::Find(args1, false, deterministic, ctx); + workspace_size = search1::GetWorkspaceSize(args1, bwd_algo1); +#endif + + ddfilter_ = ddfilter.data(); + args2.handle = handle; + args2.idesc.set(transformed_ddout_channel, iwo_group); + args2.wdesc.set(ddfilter, layout, iwo_group); + args2.odesc.set(transformed_x, iwo_group); + args2.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search2 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search2::GetWorkspaceSize(args2)); + bwd_algo2 = + search2::Find(args2, false, deterministic, workspace_size, ctx); +#else + using search2 = + paddle::operators::SearchAlgorithm; + bwd_algo2 = search2::Find(args2, false, deterministic, ctx); + workspace_size = + std::max(workspace_size, search2::GetWorkspaceSize(args2, bwd_algo2)); +#endif + } + + if (dfilter) { + dfilter_ = dfilter->data(); + args3.handle = handle; + args3.idesc.set(transformed_dout, iwo_group); + args3.wdesc.set(*dfilter, layout, iwo_group); + + args3.odesc.set(transformed_ddx_channel, iwo_group); + + args3.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search3 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3)); + filter_algo = + search3::Find(args3, false, deterministic, workspace_size, ctx); +#else + using search3 = + paddle::operators::SearchAlgorithm; + filter_algo = search3::Find(args3, false, deterministic, ctx); + workspace_size = + std::max(workspace_size, search3::GetWorkspaceSize(args3, filter_algo)); +#endif + } + + if (dx) { + transformed_dx_ = transformed_dx_channel.data(); + + args4.handle = handle; + args4.idesc.set(transformed_dout, iwo_group); + args4.wdesc.set(ddfilter, layout, iwo_group); + args4.odesc.set(transformed_dx_channel, iwo_group); + args4.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_group); +#ifdef PADDLE_WITH_HIP + using search4 = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4)); + data_algo = + search4::Find(args4, false, deterministic, workspace_size, ctx); +#else + using search4 = + paddle::operators::SearchAlgorithm; + data_algo = search4::Find(args4, false, deterministic, ctx); + workspace_size = + std::max(workspace_size, search4::GetWorkspaceSize(args4, data_algo)); +#endif + } + + int i_n, i_c, i_d, i_h, i_w; + paddle::operators::GetNCDHW(transformed_x.dims(), + GPUDNNDataLayout::kNCHW, + &i_n, + &i_c, + &i_d, + &i_h, + &i_w); + + int o_n, o_c, o_d, o_h, o_w; + paddle::operators::GetNCDHW(transformed_dout.dims(), + GPUDNNDataLayout::kNCHW, + &o_n, + &o_c, + &o_d, + &o_h, + &o_w); + + int group_offset_in = + transformed_x.numel() / transformed_x.dims()[0] / groups; + int group_offset_out = + transformed_dout.numel() / transformed_dout.dims()[0] / groups; + int group_offset_filter = filter.numel() / groups; + + paddle::operators::ScalingParamType alpha = 1.0f; + paddle::operators::ScalingParamType beta = 0.0f; + + auto wkspace_handle = ctx.cudnn_workspace_handle(); + + if (ddout) { + ddx_ = transformed_ddx.data(); + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData( + handle, + &alpha, + args1.odesc.desc(), + ddx_ + i * group_offset_in, + args1.wdesc.desc(), + filter_ + i * group_offset_filter, + args1.cdesc.desc(), + bwd_algo1, + &beta, + args1.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out, + workspace_ptr, + workspace_size)); + }, + workspace_size); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData( + handle, + &alpha, + args1.wdesc.desc(), + filter_ + i * group_offset_filter, + args1.odesc.desc(), + ddx_ + i * group_offset_in, + args1.cdesc.desc(), + bwd_algo1, + workspace_ptr, + workspace_size, + &beta, + args1.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + // MIOPEN ONLY support beta to be 0.0f + DenseTensor conv_x_ddfilter(dout.type()); + conv_x_ddfilter.Resize(transformed_ddout_channel.dims()); + T* conv_x_ddfilter_data = ctx.template Alloc(&conv_x_ddfilter); + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData( + handle, + &alpha, + args2.odesc.desc(), + x_ + i * group_offset_in, + args2.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args2.cdesc.desc(), + bwd_algo2, + &beta, + args2.idesc.desc(), + conv_x_ddfilter_data + i * group_offset_out, + workspace_ptr, + workspace_size)); + }, + workspace_size); + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenOpTensor( + handle, + miopenTensorOpAdd, + &alpha, + args2.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out, + &alpha, + args2.idesc.desc(), + conv_x_ddfilter_data + i * group_offset_out, + &beta, + args2.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out)); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData( + handle, + &alpha, + args2.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args2.odesc.desc(), + x_ + i * group_offset_in, + args2.cdesc.desc(), + bwd_algo2, + workspace_ptr, + workspace_size, + &alpha, + args2.idesc.desc(), + transformed_ddout_channel_ + i * group_offset_out)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + + if ((!is_sys_pad) && (!channel_last)) { + if (strides.size() == 2U) { + funcs::Slice( + ctx, &transformed_ddout_channel, ddout, starts, ends, axes); + } else if (!is_sys_pad && strides.size() == 3U) { + funcs::Slice( + ctx, &transformed_ddout_channel, ddout, starts, ends, axes); + } + } else if ((!is_sys_pad) && (channel_last)) { + if (strides.size() == 2U) { + funcs::Slice(ctx, + &transformed_ddout_channel, + &transformed_ddout_channel, + starts, + ends, + axes); + } else if (!is_sys_pad && strides.size() == 3U) { + funcs::Slice(ctx, + &transformed_ddout_channel, + &transformed_ddout_channel, + starts, + ends, + axes); + } + + TransToChannelLast(ctx, &transformed_ddout_channel, ddout); + } + } + + T* transformed_dout_channel_ = transformed_dout.data(); + if (dfilter) { + ddx_ = transformed_ddx_channel.data(); + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS( + dynload::miopenConvolutionBackwardWeights( + handle, + &alpha, + args3.odesc.desc(), + ddx_ + i * group_offset_in, + args3.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args3.cdesc.desc(), + filter_algo, + &beta, + args3.wdesc.desc(), + dfilter_ + i * group_offset_filter, + workspace_ptr, + workspace_size)); + }, + workspace_size); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardFilter( + handle, + &alpha, + args3.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args3.odesc.desc(), + ddx_ + i * group_offset_in, + args3.cdesc.desc(), + filter_algo, + workspace_ptr, + workspace_size, + &beta, + args3.wdesc.desc(), + dfilter_ + i * group_offset_filter)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + } + + if (dx) { + ddfilter_ = ddfilter.data(); + for (int i = 0; i < groups; i++) { +#ifdef PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionForward( + handle, + &alpha, + args4.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args4.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args4.cdesc.desc(), + data_algo, + &beta, + args4.odesc.desc(), + transformed_dx_ + i * group_offset_in, + workspace_ptr, + workspace_size)); + }, + workspace_size); +#else // PADDLE_WITH_HIP + wkspace_handle.RunFunc( + [&](void* workspace_ptr) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionForward( + handle, + &alpha, + args4.idesc.desc(), + transformed_dout_channel_ + i * group_offset_out, + args4.wdesc.desc(), + ddfilter_ + i * group_offset_filter, + args4.cdesc.desc(), + data_algo, + workspace_ptr, + workspace_size, + &beta, + args4.odesc.desc(), + transformed_dx_ + i * group_offset_in)); + }, + workspace_size); +#endif // PADDLE_WITH_HIP + } + if (channel_last) { + TransToChannelLast(ctx, &transformed_dx_channel, dx); + } + } +} + +template +void Conv3dTransposeGradGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings_, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations_, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawGPUDNNKernel(ctx, + x, + filter, + dout, + strides, + paddings_, + padding_algorithm, + groups, + dilations_, + data_format, + dx, + dfilter); +} + +} // namespace phi + +using float16 = phi::dtype::float16; + +#ifdef PADDLE_WITH_HIP +// MIOPEN do not support double +PD_REGISTER_KERNEL(conv2d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeGradGPUDNNKernel, + float, + float16) {} +PD_REGISTER_KERNEL(conv2d_transpose_grad_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeDoubleGradGPUDNNKernel, + float, + float16) {} +PD_REGISTER_KERNEL(conv3d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv3dTransposeGradGPUDNNKernel, + float, + float16) {} +#else +PD_REGISTER_KERNEL(conv2d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeGradGPUDNNKernel, + float, + double, + float16) {} +PD_REGISTER_KERNEL(conv2d_transpose_grad_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeDoubleGradGPUDNNKernel, + float, + double, + float16) {} +PD_REGISTER_KERNEL(conv3d_transpose_grad, + GPUDNN, + ALL_LAYOUT, + phi::Conv3dTransposeGradGPUDNNKernel, + float, + double, + float16) {} +#endif diff --git a/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu b/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..5de2df4a70c88e5ead803493555438ae675cf45e --- /dev/null +++ b/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu @@ -0,0 +1,381 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/kernels/conv_transpose_kernel.h" + +#include +#include "paddle/phi/backends/dynload/cudnn.h" +#include "paddle/phi/common/float16.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/padding.h" +#include "paddle/phi/kernels/funcs/slice.h" +#include "paddle/phi/kernels/transpose_kernel.h" + +#ifdef PADDLE_WITH_HIP +#include "paddle/fluid/operators/conv_miopen_helper.h" +#include "paddle/fluid/platform/device/gpu/rocm/miopen_helper.h" +#else +#include "paddle/fluid/operators/conv_cudnn_helper.h" +#include "paddle/fluid/platform/device/gpu/cuda/cudnn_helper.h" +#endif + +namespace phi { + +using GPUDNNDataLayout = paddle::platform::DataLayout; + +template +void ConvTransposeRawGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + std::vector paddings_ = paddings; + std::vector dilations_ = + dilations; // cudnn v5 does not support dilations + const T* filter_data = filter.data(); + const GPUDNNDataLayout data_layout = + (data_format != "NHWC" ? GPUDNNDataLayout::kNCHW + : GPUDNNDataLayout::kNHWC); + std::vector x_vec = vectorize(x.dims()); + std::vector out_vec = vectorize(out->dims()); + // if channel_last, transpose to channel_first + DenseTensor x_transpose; + if (data_layout == GPUDNNDataLayout::kNHWC) { + if (strides.size() == 2U) { + std::vector axis = {0, 3, 1, 2}; + for (size_t i = 0; i < axis.size(); ++i) { + x_vec[i] = x.dims()[axis[i]]; + out_vec[i] = out->dims()[axis[i]]; + } + x_transpose = Transpose(ctx, x, axis); + } else if (strides.size() == 3U) { + std::vector axis = {0, 4, 1, 2, 3}; + for (size_t i = 0; i < axis.size(); ++i) { + x_vec[i] = x.dims()[axis[i]]; + out_vec[i] = out->dims()[axis[i]]; + } + x_transpose = Transpose(ctx, x, axis); + } + } else { + x_transpose = x; + } + + // update padding and dilation + auto x_dims = x_transpose.dims(); + auto filter_dims = filter.dims(); + DDim x_data_dims; + x_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, x_data_dims, strides, ksize); + + int data_dim = strides.size(); // 2d or 3d + bool is_sys_pad = funcs::IsSymmetricPadding(paddings_, data_dim); + + std::vector x_pad(x_dims.size() * 2, 0); + DenseTensor transformed_x; + std::vector padding_common(data_dim, 0); + if (!is_sys_pad) { + std::vector padding_diff(data_dim); + std::vector new_x_shape_vec(data_dim + 2); + new_x_shape_vec[0] = x_dims[0]; + new_x_shape_vec[1] = x_dims[1]; + + for (size_t i = 0; i < data_dim; ++i) { + padding_diff[i] = std::abs(paddings_[2 * i] - paddings_[2 * i + 1]); + padding_common[i] = std::min(paddings_[2 * i], paddings_[2 * i + 1]); + new_x_shape_vec[i + 2] = x_dims[i + 2] + padding_diff[i]; + x_pad[2 * i + 4] = paddings_[2 * i] - padding_common[i]; + x_pad[2 * i + 4 + 1] = paddings_[2 * i + 1] - padding_common[i]; + } + DDim new_x_shape(make_ddim(new_x_shape_vec)); + transformed_x.Resize(new_x_shape); + ctx.template Alloc(&transformed_x); + + const int rank = x_dims.size(); + T pad_value(0.0); + switch (rank) { + case 4: { + funcs::PadFunction( + ctx, x_pad, x_transpose, pad_value, &transformed_x); + } break; + case 5: { + funcs::PadFunction( + ctx, x_pad, x_transpose, pad_value, &transformed_x); + } break; + default: + PADDLE_THROW(errors::InvalidArgument( + "Op(ConvTranspose) only supports 4-D or 5-D x DenseTensor.")); + } + } else { + transformed_x = x_transpose; + if (paddings_.size() == data_dim) { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[i]; + } + } else { + for (size_t i = 0; i < data_dim; ++i) { + padding_common[i] = paddings_[2 * i]; + } + } + } + + std::vector starts(data_dim, 0); + std::vector ends(data_dim, 0); + std::vector axes(data_dim, 0); + for (size_t i = 0; i < data_dim; ++i) { + starts[i] = x_pad[2 * i + 4] * (strides[i] + 1); + ends[i] = starts[i] + out_vec[i + 2]; + axes[i] = i + 2; + } + + const T* x_data = transformed_x.data(); + x_vec = vectorize(transformed_x.dims()); + + std::vector transformed_out_vec = out_vec; + for (size_t i = 0; i < data_dim; ++i) { + transformed_out_vec[i + 2] = + out_vec[i + 2] + (x_pad[2 * i + 4] + x_pad[2 * i + 5]) * strides[i] - + 2 * padding_common[i] + paddings_[2 * i] + paddings_[2 * i + 1]; + } + + DenseTensor transformed_out; + if (!is_sys_pad) { + transformed_out.Resize(make_ddim(transformed_out_vec)); + ctx.template Alloc(&transformed_out); + } else { + ctx.template Alloc(out); + transformed_out.ShareDataWith(*out); + transformed_out.Resize(make_ddim(transformed_out_vec)); + } + T* transformed_out_data = transformed_out.data(); + + GPUDNNDataLayout layout; + + int iwo_groups = groups; + int c_groups = 1; +#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1) + iwo_groups = 1; + c_groups = groups; + groups = 1; +#endif + + if (strides.size() == 2U) { + layout = GPUDNNDataLayout::kNCHW; + } else { + layout = GPUDNNDataLayout::kNCDHW; + } + + size_t workspace_size = 0; +#ifdef PADDLE_WITH_HIP + miopenConvBwdDataAlgorithm_t algo{}; +#else + cudnnConvolutionBwdDataAlgo_t algo{}; +#endif + // ------------------- cudnn conv algorithm --------------------- + auto handle = ctx.cudnn_handle(); + auto layout_tensor = paddle::platform::GetCudnnTensorFormat(layout); + bool deterministic = FLAGS_cudnn_deterministic; + + auto dtype = paddle::platform::CudnnDataType::type; + // ------------------- cudnn descriptors --------------------- + paddle::operators::ConvArgs args{&transformed_out, + &filter, + &transformed_x, + strides, + padding_common, + dilations_, + dtype}; + args.handle = handle; + args.idesc.set(transformed_out, iwo_groups); + args.wdesc.set(filter, layout_tensor, iwo_groups); + args.odesc.set(transformed_x, iwo_groups); + args.cdesc.set(dtype, + padding_common, + strides, + dilations_, + paddle::platform::AllowTF32Cudnn(), + c_groups); + +#ifdef PADDLE_WITH_HIP + using search = + paddle::operators::SearchAlgorithm; + workspace_size = std::max(workspace_size, search::GetWorkspaceSize(args)); + algo = search::Find(args, false, deterministic, workspace_size, ctx); +#else + using search = + paddle::operators::SearchAlgorithm; + algo = search::Find(args, false, deterministic, ctx); + workspace_size = + std::max(workspace_size, search::GetWorkspaceSize(args, algo)); +#endif + + // ------------------- cudnn conv transpose forward --------------------- + int x_offset = transformed_x.numel() / transformed_x.dims()[0] / groups; + int out_offset = transformed_out.numel() / transformed_out.dims()[0] / groups; + int filter_offset = filter.numel() / groups; + paddle::operators::ScalingParamType alpha = 1.0f; + paddle::operators::ScalingParamType beta = 0.0f; + auto workspace_handle = ctx.cudnn_workspace_handle(); + for (int g = 0; g < groups; g++) { +#ifdef PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenConvolutionBackwardData( + handle, + &alpha, + args.odesc.desc(), + x_data + x_offset * g, + args.wdesc.desc(), + filter_data + filter_offset * g, + args.cdesc.desc(), + algo, + &beta, + args.idesc.desc(), + transformed_out_data + out_offset * g, + cudnn_workspace, + workspace_size)); + }; +#else // PADDLE_WITH_HIP + auto cudnn_func = [&](void* cudnn_workspace) { + PADDLE_ENFORCE_GPU_SUCCESS(dynload::cudnnConvolutionBackwardData( + handle, + &alpha, + args.wdesc.desc(), + filter_data + filter_offset * g, + args.odesc.desc(), + x_data + x_offset * g, + args.cdesc.desc(), + algo, + cudnn_workspace, + workspace_size, + &beta, + args.idesc.desc(), + transformed_out_data + out_offset * g)); + }; +#endif // PADDLE_WITH_HIP + workspace_handle.RunFunc(cudnn_func, workspace_size); + } + if (!is_sys_pad && strides.size() == 2U) { + funcs::Slice(ctx, &transformed_out, out, starts, ends, axes); + } else if (!is_sys_pad && strides.size() == 3U) { + funcs::Slice(ctx, &transformed_out, out, starts, ends, axes); + } + + if (data_layout == GPUDNNDataLayout::kNHWC) { + DenseTensor out_transpose; + DenseTensor out_nchw; + out_nchw.ShareDataWith(*out); + out_nchw.Resize(make_ddim(out_vec)); + + if (strides.size() == 2U) { + out_transpose = Transpose(ctx, out_nchw, {0, 2, 3, 1}); + } else if (strides.size() == 3U) { + out_transpose = Transpose(ctx, out_nchw, {0, 2, 3, 4, 1}); + } + *out = out_transpose; + } +} + +template +void Conv2dTransposeGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + ConvTransposeRawGPUDNNKernel(ctx, + x, + filter, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + out); +} + +template +void Conv3dTransposeGPUDNNKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + ConvTransposeRawGPUDNNKernel(ctx, + x, + filter, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + out); +} + +} // namespace phi + +using float16 = phi::dtype::float16; + +#ifdef PADDLE_WITH_HIP +// MIOPEN do not support double +PD_REGISTER_KERNEL(conv2d_transpose, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeGPUDNNKernel, + float, + float16) {} +PD_REGISTER_KERNEL(conv3d_transpose, + GPUDNN, + ALL_LAYOUT, + phi::Conv3dTransposeGPUDNNKernel, + float, + float16) {} +#else +PD_REGISTER_KERNEL(conv2d_transpose, + GPUDNN, + ALL_LAYOUT, + phi::Conv2dTransposeGPUDNNKernel, + float, + double, + float16) {} +PD_REGISTER_KERNEL(conv3d_transpose, + GPUDNN, + ALL_LAYOUT, + phi::Conv3dTransposeGPUDNNKernel, + float, + double, + float16) {} +#endif diff --git a/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h b/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..d4fd952a670012900b2152a9236c5c6a2861c6c7 --- /dev/null +++ b/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h @@ -0,0 +1,364 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/conv_transpose_grad_kernel.h" + +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/vol2col.h" +#include "paddle/phi/common/layout.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/blas/blas.h" +#include "paddle/phi/kernels/funcs/concat_and_split_functor.h" +#include "paddle/phi/kernels/funcs/slice.h" + +namespace phi { + +template +void ConvTransposeGradRawKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + const DataLayout data_layout = + paddle::framework::StringToDataLayout(data_format); + // For filter, we do not use const pointer because we will do reshape, + // but we should avoid modifying its value. + DenseTensor filter_ = filter; + + if ((!dx) && (!dfilter)) { + return; + } + + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + auto x_dims = x.dims(); + auto filter_dims = filter_.dims(); + auto dout_dims = dout.dims(); + const int batch_size = static_cast(x.dims()[0]); + + DDim in_data_dims; + if (data_layout != DataLayout::kNHWC) { + in_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + } else { + in_data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1); + } + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, in_data_dims, strides, ksize); + + // x_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first + // x_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last + std::vector x_shape_vec = vectorize(x.dims()); + // filter_shape_vec: {i_c, o_c, k_h, k_w} or {i_c, o_c, k_d, k_h, k_w} + std::vector filter_shape_vec = vectorize(filter_.dims()); + + // use col_shape in the im2col and col2im (or vol2col and col2vol) + // calculation + // col_shape_vec: {o_c, k_h, k_w, h, w} or {o_c, k_d, k_h, k_w, d, h, w} for + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + if (data_layout != DataLayout::kNHWC) { + col_shape_vec[0] = dout_dims[1]; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = x_shape_vec[j + 2]; + } + } else { + col_shape_vec[0] = dout_dims[dout_dims.size() - 1]; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = x_shape_vec[j + 1]; + } + } + DDim col_shape(make_ddim(col_shape_vec)); + + // use col_matrix_shape in the gemm calculation + // size: (o_c * k_h * k_w, h * w) or (o_c * k_d * k_h * k_w, d * h * w) + DDim col_matrix_shape = flatten_to_2d(col_shape, data_dim + 1); + + // output size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first + // output size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last + DDim output_shape = slice_ddim(dout.dims(), 1, dout.dims().size()); + + // x matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first + // x matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last + DDim x_matrix_shape; + if (data_layout != DataLayout::kNHWC) { + x_matrix_shape = {x_dims[1], col_matrix_shape[1]}; + } else { + x_matrix_shape = {col_matrix_shape[1], x_dims[x_dims.size() - 1]}; + } + + // filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w) + DDim filter_matrix_shape; + if (data_layout != DataLayout::kNHWC) { + filter_matrix_shape = {x_dims[1], col_matrix_shape[0] / groups}; + } else { + filter_matrix_shape = {x_dims[x_dims.size() - 1], + col_matrix_shape[0] / groups}; + } + filter_.Resize(filter_matrix_shape); + + int in_step = (data_layout != DataLayout::kNHWC + ? static_cast(x_dims[1]) / groups + : static_cast(x_dims[x_dims.size() - 1]) / groups); + int col_step = static_cast(col_matrix_shape[0]) / groups; + + // convolution transpose grad on x: + // im2col + gemm (similar to conv-forward) + // x need to compute gradient + auto blas = funcs::GetBlas(ctx); + if (dx || dfilter) { + DenseTensor col; + col.Resize(col_shape); + ctx.template Alloc(&col); + // col_matrix shares the same piece of data with col, + // but will be reshaped into a two-dimensional matrix shape + // to call the matrix multiplication interface. + DenseTensor col_matrix; + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + + DenseTensor dfilter_; + funcs::SetConstant set_zero; + + paddle::operators::math:: + Im2ColFunctor + im2col; + paddle::operators::math::Vol2ColFunctor vol2col; + funcs::ConcatFunctor concat_functor; + + if (dx) { + ctx.template Alloc(dx); + set_zero(ctx, dx, static_cast(0)); + } + if (dfilter) { // dfilter_ size (i_c, o_c/g, k_h, k_w) + ctx.template Alloc(dfilter); + set_zero(ctx, dfilter, static_cast(0)); + dfilter_ = *dfilter; + dfilter_.Resize(filter_matrix_shape); + } + + size_t D = x.dims().size(); + for (int i = 0; i < batch_size; i++) { + // batch with size (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for + // channel_first + // batch with size (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for + // channel_last + DenseTensor dout_batch = dout.Slice(i, i + 1).Resize(output_shape); + + if (data_dim == 2U) { + // im2col: dy -> col matrix + // from (o_c, o_h, o_w) to (o_c * k_h * k_w, i_h * i_w) for + // channel_first + // from (o_h, o_w, o_c) to (o_c * k_h * k_w, i_h * i_w) for + // channel_last + im2col(ctx, + dout_batch, + dilations_, + strides, + std::vector{ + paddings_[0], paddings_[2], paddings_[1], paddings_[3]}, + &col, + data_layout); + } else if (data_dim == 3U) { + // vol2col: dy -> col_matrix + // from (o_c, o_d, o_h, o_w) to (o_c * k_d * k_h * k_w, i_d * i_h * + // i_w) for channel_first + // from (o_d, o_h, o_w, o_c) to (i_d * i_h * i_w, o_c * k_d * k_h * + // k_w) for channel_last + vol2col( + ctx, dout_batch, dilations_, strides, paddings_, &col, data_layout); + } + if (dx) { + // batch with size (i_c, i_h, i_w) or (i_h, i_w, i_c) + DenseTensor dx_batch = dx->Slice(i, i + 1).Resize(x_matrix_shape); + + // gemm: dx = filter * dy + // (i_c, o_c * k_h * k_w) * (o_c * k_h * k_w, i_h * i_w) -> (i_c, i_h + // * i_w) + // or + // (i_c, o_c * k_d * k_h * k_w) * (o_c * k_d * k_h * k_w, i_d * i_h * + // i_w) -> (i_c, + // i_d, i_h, i_w) + // gemm: dx = dy^T * filter^T for channel_last + + std::vector dx_batch_vec; + for (int g = 0; g < groups; g++) { + // dx_slice: (i_c/g, i_h * i_w) or (i_c/g, i_d * i_h * i_w) + // for channel_first + // dx_slice: (i_h * i_w, i_c/g) or (i_d * i_h * i_w, i_c/g) + // for channel_last + // filter_slice: (i_c/g, o_c/g * k_h * k_w) + DenseTensor filter_slice = + filter_.Slice(g * in_step, (g + 1) * in_step); + // col_matrix_slice: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d * + // k_h * k_w, d * h * w) + DenseTensor col_matrix_slice = + col_matrix.Slice(g * col_step, (g + 1) * col_step); + if (data_layout != DataLayout::kNHWC) { + DenseTensor dx_slice = + dx_batch.Slice(g * in_step, (g + 1) * in_step); + blas.MatMul(filter_slice, + false, + col_matrix_slice, + false, + static_cast(1.0), + &dx_slice, + static_cast(0.0)); + } else { + DenseTensor dx_slice; + funcs::Slice( + ctx, &dx_batch, &dx_slice, g * in_step, (g + 1) * in_step, 1); + blas.MatMul(col_matrix_slice, + true, + filter_slice, + true, + static_cast(1.0), + &dx_slice, + static_cast(0.0)); + DDim dx_slice_shape; + if (data_dim == 2U) { + dx_slice_shape = {x_dims[1], x_dims[2], in_step}; + } else { + dx_slice_shape = {x_dims[1], x_dims[2], x_dims[3], in_step}; + } + dx_slice = dx_slice.Resize(dx_slice_shape); + dx_batch_vec.push_back(dx_slice); + } + } + if (data_layout == DataLayout::kNHWC) { + concat_functor(ctx, dx_batch_vec, static_cast(D - 2), &dx_batch); + } + } + if (dfilter) { + // x batch: (i_c, i_h * i_w) or (i_h, i_w * i_c) + DenseTensor in_batch = x.Slice(i, i + 1).Resize(x_matrix_shape); + // gemm: d_filter = x * dy^T + // (i_c, i_h * i_w) * (i_h * i_w, o_c * k_h * k_w) -> (i_c, o_c * k_h + // * k_w) + // or + // (i_c, i_d * i_h * i_w) * (i_d * i_h * i_w, o_c * k_d * k_h * k_w) + // -> (i_c, o_c * k_d * + // k_h * k_w) + // gemm: d_filter = x^T * dy^T for channel_last + + for (int g = 0; g < groups; g++) { + DenseTensor dfilter_slice = + dfilter_.Slice(g * in_step, (g + 1) * in_step); + DenseTensor col_matrix_slice = + col_matrix.Slice(g * col_step, (g + 1) * col_step); + if (data_layout != DataLayout::kNHWC) { + DenseTensor in_batch_slice = + in_batch.Slice(g * in_step, (g + 1) * in_step); + blas.MatMul(in_batch_slice, + false, + col_matrix_slice, + true, + static_cast(1.0), + &dfilter_slice, + static_cast(1.0)); + } else { + DenseTensor in_batch_slice; + funcs::Slice(ctx, + &in_batch, + &in_batch_slice, + g * in_step, + (g + 1) * in_step, + 1); + blas.MatMul(in_batch_slice, + true, + col_matrix_slice, + true, + static_cast(1.0), + &dfilter_slice, + static_cast(1.0)); + } + } + } + } + } +} + +template +void Conv2dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawKernel(ctx, + x, + filter, + dout, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + dx, + dfilter); +} + +template +void Conv3dTransposeGradKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const DenseTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* dx, + DenseTensor* dfilter) { + ConvTransposeGradRawKernel(ctx, + x, + filter, + dout, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + dx, + dfilter); +} + +} // namespace phi diff --git a/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h b/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..ee2faf761fe3263c892248ca2f243f7f86d7d038 --- /dev/null +++ b/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h @@ -0,0 +1,278 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/conv_transpose_kernel.h" + +#include "paddle/fluid/operators/math/im2col.h" +#include "paddle/fluid/operators/math/vol2col.h" +#include "paddle/phi/common/layout.h" +#include "paddle/phi/core/ddim.h" +#include "paddle/phi/kernels/cpu/conv_util.h" +#include "paddle/phi/kernels/funcs/blas/blas.h" +#include "paddle/phi/kernels/funcs/concat_and_split_functor.h" +#include "paddle/phi/kernels/funcs/slice.h" + +namespace phi { + +template +void ConvTransposeRawKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + const DataLayout data_layout = + paddle::framework::StringToDataLayout(data_format); + // The filter will be reshaped, so it should not be constant + DenseTensor filter_ = filter; + std::vector paddings_ = paddings; + std::vector dilations_ = dilations; + + auto x_dims = x.dims(); + auto filter_dims = filter_.dims(); + auto out_dims = out->dims(); + const int batch_size = static_cast(x.dims()[0]); + + DDim in_data_dims; + if (data_layout != DataLayout::kNHWC) { + in_data_dims = slice_ddim(x_dims, 2, x_dims.size()); + } else { + in_data_dims = slice_ddim(x_dims, 1, x_dims.size() - 1); + } + DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size()); + std::vector ksize = vectorize(filter_data_dims); + UpdatePaddingAndDilation( + &paddings_, &dilations_, padding_algorithm, in_data_dims, strides, ksize); + + // x_shape_vec: {n, c, h, w} or {n, c, d, h, w} for channel_first + // x_shape_vec: {n, h, w, c} or {n, d, h, w, c} for channel_last + std::vector x_shape_vec = vectorize(x.dims()); + // filter_shape_vec: {k_o, k_i, k_h, k_w} or {k_o, k_i, k_d, k_h, k_w} + std::vector filter_shape_vec = vectorize(filter_.dims()); + + // use col_shape in the im2col and col2im (or vol2col and col2vol) + // calculation + // col_shape_vec: {o_c/g, k_h, k_w, h, w} or {o_c/g, k_d, k_h, k_w, d, h, w} + size_t data_dim = filter_shape_vec.size() - 2; + std::vector col_shape_vec(1 + 2 * data_dim); + if (data_layout != DataLayout::kNHWC) { + col_shape_vec[0] = out_dims[1] / groups; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = x_shape_vec[j + 2]; + } + } else { + col_shape_vec[0] = out_dims[out_dims.size() - 1] / groups; + for (size_t j = 0; j < data_dim; ++j) { + col_shape_vec[j + 1] = filter_shape_vec[j + 2]; + col_shape_vec[j + 1 + data_dim] = x_shape_vec[j + 1]; + } + } + DDim col_shape(make_ddim(col_shape_vec)); + + // use col_matrix_shape in the gemm calculation + // size: (o_c/g * k_h * k_w, h * w) or (o_c/g * k_d * k_h * k_w, d * h * w) + DDim col_matrix_shape = flatten_to_2d(col_shape, data_dim + 1); + + DenseTensor col; + col.Resize(col_shape); + ctx.template Alloc(&col); + // col_matrix shares the same piece of data with col, + // but will be reshaped into a two-dimensional matrix shape + // to call the matrix multiplication interface. + DenseTensor col_matrix; + col_matrix.ShareDataWith(col); + col_matrix.Resize(col_matrix_shape); + + // out size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first + // out size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last + DDim out_shape = slice_ddim(out->dims(), 1, out->dims().size()); + + // x matrix size: (i_c, h * w) or (i_c, d * h * w) for channel_first + // x matrix size: (h * w, i_c) or (d * h * w, i_c) for channel_last + DDim x_matrix_shape; + if (data_layout != DataLayout::kNHWC) { + x_matrix_shape = {x_dims[1], col_matrix_shape[1]}; + } else { + x_matrix_shape = {col_matrix_shape[1], x_dims[x_dims.size() - 1]}; + } + + // filter size: (i_c, o_c/g * k_h * k_w) or (i_c, o_c/g * k_d * k_h * k_w) + DDim filter_matrix_shape; + if (data_layout != DataLayout::kNHWC) { + filter_matrix_shape = {x_dims[1], col_matrix_shape[0]}; + } else { + filter_matrix_shape = {x_dims[x_dims.size() - 1], col_matrix_shape[0]}; + } + filter_.Resize(filter_matrix_shape); + + ctx.template Alloc(out); + + funcs::SetConstant set_zero; + + auto blas = funcs::GetBlas(ctx); + set_zero(ctx, out, static_cast(0)); + + int in_step = (data_layout != DataLayout::kNHWC + ? static_cast(x_dims[1]) / groups + : static_cast(x_dims[x_dims.size() - 1]) / groups); + + int out_step = + (data_layout != DataLayout::kNHWC + ? static_cast(out_dims[1]) / groups + : static_cast(out_dims[out_dims.size() - 1]) / groups); + paddle::operators::math:: + Col2ImFunctor + col2im; + paddle::operators::math::Col2VolFunctor col2vol; + funcs::ConcatFunctor concat_functor; + + // convolution transpose: gemm + col2im or col2vol (similar to conv-backward + // on x) + size_t D = x.dims().size(); + for (int i = 0; i < batch_size; i++) { + // batch with size (i_c, h * w) or (i_c, d * h * w) for channel_first + // batch with size (h * w, i_c) or (d * h * w, i_c) for channel_last + DenseTensor x_batch = x.Slice(i, i + 1).Resize(x_matrix_shape); + + // out size: (o_c, o_h, o_w) or (o_c, o_d, o_h, o_w) for channel_first + // out size: (o_h, o_w, o_c) or (o_d, o_h, o_w, o_c) for channel_last + DenseTensor out_batch = out->Slice(i, i + 1).Resize(out_shape); + + std::vector out_batch_vec; + for (int g = 0; g < groups; g++) { + int64_t start = g * in_step; + int64_t end = (g + 1) * in_step; + int axes = (data_layout != DataLayout::kNHWC ? 0 : 1); + DenseTensor filter_slice = filter_.Slice(g * in_step, (g + 1) * in_step); + DenseTensor in_slice, out_slice; + + // col_matrix = filter_slice * x_slice + // of shape (o_c/g * k_h * k_w, h * w) + // or (o_c/g * k_d * k_h * k_w, d * h * w) + if (data_layout != DataLayout::kNHWC) { + in_slice = x_batch.Slice(g * in_step, (g + 1) * in_step); + out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); + blas.MatMul(filter_slice, + true, + in_slice, + false, + static_cast(1.0), + &col_matrix, + static_cast(0.0)); + } else { + funcs::Slice(ctx, &x_batch, &in_slice, start, end, axes); + start = g * out_step; + end = (g + 1) * out_step; + axes = D - 2; + if (D == 4U) { + funcs::Slice( + ctx, &out_batch, &out_slice, start, end, axes); + } else if (D == 5U) { + funcs::Slice( + ctx, &out_batch, &out_slice, start, end, axes); + } + blas.MatMul(filter_slice, + true, + in_slice, + true, + static_cast(1.0), + &col_matrix, + static_cast(0.0)); + } + + if (data_dim == 2U) { + // col2im: col_matrix -> dy from (o_c/g * k_h * k_w, h * w) to (o_c/g, + // o_h, o_w) or (o_h, o_w, o_c/g) + col2im(ctx, + col, + dilations_, + strides, + std::vector{ + paddings_[0], paddings_[2], paddings_[1], paddings_[3]}, + &out_slice, + data_layout); + } else if (data_dim == 3U) { + // col2vol: col_matrix -> dy from (o_c/g * k_d * k_h * k_w, d * h * w) + // to (o_c/g, o_d, o_h, o_w) or (o_d, o_h, o_w, o_c/g) + col2vol( + ctx, col, dilations_, strides, paddings_, &out_slice, data_layout); + } + if (data_layout == DataLayout::kNHWC) { + out_batch_vec.push_back(out_slice); + } + } + if (data_layout == DataLayout::kNHWC) { + concat_functor(ctx, out_batch_vec, static_cast(D - 2), &out_batch); + } + } +} + +template +void Conv2dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + ConvTransposeRawKernel(ctx, + x, + filter, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + out); +} + +template +void Conv3dTransposeKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const std::vector& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + DenseTensor* out) { + ConvTransposeRawKernel(ctx, + x, + filter, + strides, + paddings, + padding_algorithm, + groups, + dilations, + data_format, + out); +} + +} // namespace phi diff --git a/paddle/phi/ops/compat/conv_transpose_sig.cc b/paddle/phi/ops/compat/conv_transpose_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..8697168b8274736ef0eb2db58135283928d3611c --- /dev/null +++ b/paddle/phi/ops/compat/conv_transpose_sig.cc @@ -0,0 +1,141 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature Conv2dTransposeOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("conv2d_transpose", + {"Input", "Filter"}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {"Output"}); +} + +KernelSignature Conv2dTransposeGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("conv2d_transpose_grad", + {"Input", "Filter", GradVarName("Output")}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {GradVarName("Input"), GradVarName("Filter")}); +} + +KernelSignature Conv2dTransposeDoubleGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("conv2d_transpose_grad_grad", + {"Input", "Filter", "DOutput", "DDInput", "DDFilter"}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {"DInput", "DFilter", "DDOutput"}); +} + +KernelSignature Conv3dTransposeOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("conv3d_transpose", + {"Input", "Filter"}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {"Output"}); +} + +KernelSignature Conv3dTransposeGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("conv3d_transpose_grad", + {"Input", "Filter", GradVarName("Output")}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {GradVarName("Input"), GradVarName("Filter")}); +} + +KernelSignature DepthwiseConv2dTransposeOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("depthwise_conv2d_transpose", + {"Input", "Filter"}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {"Output"}); +} + +KernelSignature DepthwiseConv2dTransposeGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("depthwise_conv2d_transpose_grad", + {"Input", "Filter", GradVarName("Output")}, + {"strides", + "paddings", + "output_padding", + "output_size", + "padding_algorithm", + "groups", + "dilations", + "data_format"}, + {GradVarName("Input"), GradVarName("Filter")}); +} + +} // namespace phi + +PD_REGISTER_ARG_MAPPING_FN(conv2d_transpose, + phi::Conv2dTransposeOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(conv2d_transpose_grad, + phi::Conv2dTransposeGradOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(conv2d_transpose_grad_grad, + phi::Conv2dTransposeDoubleGradOpArgumentMapping); + +PD_REGISTER_ARG_MAPPING_FN(conv3d_transpose, + phi::Conv3dTransposeOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(conv3d_transpose_grad, + phi::Conv3dTransposeGradOpArgumentMapping); + +PD_REGISTER_ARG_MAPPING_FN(depthwise_conv2d_transpose, + phi::DepthwiseConv2dTransposeOpArgumentMapping); +PD_REGISTER_ARG_MAPPING_FN(depthwise_conv2d_transpose_grad, + phi::DepthwiseConv2dTransposeGradOpArgumentMapping);