From fe169bf1a31e5ffd4e6c22c3509e23305bd5867a Mon Sep 17 00:00:00 2001 From: WangZhen <23097963+0x45f@users.noreply.github.com> Date: Wed, 7 Sep 2022 15:48:37 +0800 Subject: [PATCH] [OpAttr]Adapt tensor output_size for conv2d_transpose and depthwise_conv2d_transpose (#45620) Adapt tensor output_size for conv2d_transpose and depthwise_conv2d_transpose --- paddle/fluid/operators/conv_transpose_op.cc | 11 +- paddle/phi/api/yaml/legacy_api.yaml | 8 +- paddle/phi/api/yaml/legacy_backward.yaml | 16 +- paddle/phi/infermeta/backward.cc | 18 +- paddle/phi/infermeta/backward.h | 16 +- paddle/phi/infermeta/binary.cc | 28 +++ paddle/phi/infermeta/binary.h | 13 ++ .../phi/kernels/conv_transpose_grad_kernel.h | 7 +- paddle/phi/kernels/conv_transpose_kernel.h | 5 +- .../kernels/cpu/conv_transpose_grad_kernel.cc | 2 +- .../phi/kernels/cpu/conv_transpose_kernel.cc | 2 +- .../kernels/gpu/conv_transpose_grad_kernel.cu | 4 +- .../phi/kernels/gpu/conv_transpose_kernel.cu | 2 +- .../gpudnn/conv_transpose_grad_kernel.cu | 4 +- .../kernels/gpudnn/conv_transpose_kernel.cu | 2 +- .../impl/conv_transpose_grad_kernel_impl.h | 2 +- .../kernels/impl/conv_transpose_kernel_impl.h | 2 +- .../kernels/xpu/conv_transpose_grad_kernel.cc | 2 +- .../phi/kernels/xpu/conv_transpose_kernel.cc | 2 +- python/paddle/fluid/dygraph/nn.py | 23 ++- python/paddle/fluid/layers/nn.py | 44 ++++- .../unittests/test_conv2d_transpose_op.py | 175 +++++++++++++++++- python/paddle/nn/functional/conv.py | 25 ++- 23 files changed, 359 insertions(+), 54 deletions(-) diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 56875c2d43..5cc991d8f1 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -121,7 +121,8 @@ void Conv2DTransposeOpMaker::Make() { AddAttr>("output_size", "(vector default: []), the " "size of the output tensor") - .SetDefault({}); + .SetDefault({}) + .SupportTensor(); AddAttr("groups", "(int default:1), the groups number of the convolution " "transpose operator. ") @@ -398,10 +399,10 @@ namespace ops = paddle::operators; // conv2d_transpose DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose, Conv2dTranposeInferShapeFunctor, - PD_INFER_META(phi::ConvTransposeInferMeta)); + PD_INFER_META(phi::Conv2dTransposeInferMeta)); DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad, Conv2dTranposeGradInferShapeFunctor, - PD_INFER_META(phi::ConvTransposeGradInferMeta)); + PD_INFER_META(phi::Conv2dTransposeGradInferMeta)); DECLARE_INFER_SHAPE_FUNCTOR( conv2d_transpose_grad_grad, Conv2dTranposeDoubleGradInferShapeFunctor, @@ -443,10 +444,10 @@ REGISTER_OPERATOR(conv3d_transpose_grad, // depthwise conv2d_transpose DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose, DepthWiseConv2dTranposeInferShapeFunctor, - PD_INFER_META(phi::ConvTransposeInferMeta)); + PD_INFER_META(phi::Conv2dTransposeInferMeta)); DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad, DepthWiseConv2dTranposeGradInferShapeFunctor, - PD_INFER_META(phi::ConvTransposeGradInferMeta)); + PD_INFER_META(phi::Conv2dTransposeGradInferMeta)); REGISTER_OPERATOR(depthwise_conv2d_transpose, ops::ConvTransposeOp, diff --git a/paddle/phi/api/yaml/legacy_api.yaml b/paddle/phi/api/yaml/legacy_api.yaml index 1599dba981..d48e664858 100755 --- a/paddle/phi/api/yaml/legacy_api.yaml +++ b/paddle/phi/api/yaml/legacy_api.yaml @@ -541,10 +541,10 @@ backward : conv2d_grad - api : conv2d_transpose - args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) + args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) output : Tensor(out) infer_meta : - func : ConvTransposeInferMeta + func : Conv2dTransposeInferMeta kernel : func : conv2d_transpose use_gpudnn : true @@ -665,10 +665,10 @@ backward : depthwise_conv2d_grad - api : depthwise_conv2d_transpose - args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) + args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) output : Tensor(out) infer_meta : - func : ConvTransposeInferMeta + func : Conv2dTransposeInferMeta kernel : func : depthwise_conv2d_transpose backward : depthwise_conv2d_transpose_grad diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 27f8b3f491..27738c49ba 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -483,8 +483,8 @@ optional : grad_input_grad, grad_filter_grad - backward_api : conv2d_transpose_double_grad - forward : conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter) - args : (Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) + forward : conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter) + args : (Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) output : Tensor(x_grad), Tensor(filter_grad), Tensor(grad_out_grad) infer_meta : func : Conv2dTransposeDoubleGradInferMeta @@ -493,11 +493,11 @@ use_gpudnn : true - backward_api : conv2d_transpose_grad - forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out) - args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) + forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out) + args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) output : Tensor(x_grad), Tensor(filter_grad) infer_meta : - func : ConvTransposeGradInferMeta + func : Conv2dTransposeGradInferMeta kernel : func : conv2d_transpose_grad use_gpudnn : true @@ -635,11 +635,11 @@ optional : grad_input_grad, grad_filter_grad - backward_api : depthwise_conv2d_transpose_grad - forward : depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out) - args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) + forward : depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out) + args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) output : Tensor(x_grad), Tensor(filter_grad) infer_meta : - func : ConvTransposeGradInferMeta + func : Conv2dTransposeGradInferMeta kernel : func : depthwise_conv2d_transpose_grad diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 82d83d4950..2194435af3 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -143,6 +143,22 @@ void ConvTransposeGradInferMeta(const MetaTensor& x, GeneralBinaryGradInferMeta(x, filter, dx, dfilter); } +void Conv2dTransposeGradInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const MetaTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const IntArray& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* dx, + MetaTensor* dfilter) { + GeneralBinaryGradInferMeta(x, filter, dx, dfilter); +} + void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x, const MetaTensor& filter, const MetaTensor& dout, @@ -151,7 +167,7 @@ void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index 0930358ad8..3e7cfa3ad8 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -76,6 +76,20 @@ void ConvTransposeGradInferMeta(const MetaTensor& x, MetaTensor* dx, MetaTensor* dfilter); +void Conv2dTransposeGradInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const MetaTensor& dout, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const IntArray& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* dx, + MetaTensor* dfilter); + void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x, const MetaTensor& filter, const MetaTensor& dout, @@ -84,7 +98,7 @@ void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index b0759bf68c..7f3c91181a 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -800,6 +800,34 @@ void ConvTransposeInferMeta(const MetaTensor& x, out->set_dtype(x.dtype()); } +void Conv2dTransposeInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const IntArray& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* out, + MetaConfig config) { + std::vector vec_output_size(output_size.GetData().begin(), + output_size.GetData().end()); + ConvTransposeInferMeta(x, + filter, + strides, + paddings, + output_padding, + vec_output_size, + padding_algorithm, + groups, + dilations, + data_format, + out, + config); +} + void CrossInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index 9dcf498776..e91470d32b 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -119,6 +119,19 @@ void ConvTransposeInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); +void Conv2dTransposeInferMeta(const MetaTensor& x, + const MetaTensor& filter, + const std::vector& strides, + const std::vector& paddings, + const std::vector& output_padding, + const IntArray& output_size, + const std::string& padding_algorithm, + int groups, + const std::vector& dilations, + const std::string& data_format, + MetaTensor* out, + MetaConfig config = MetaConfig()); + void CrossInferMeta(const MetaTensor& x, const MetaTensor& y, int axis, diff --git a/paddle/phi/kernels/conv_transpose_grad_kernel.h b/paddle/phi/kernels/conv_transpose_grad_kernel.h index 00d5fb51f0..c137a1914e 100644 --- a/paddle/phi/kernels/conv_transpose_grad_kernel.h +++ b/paddle/phi/kernels/conv_transpose_grad_kernel.h @@ -17,6 +17,7 @@ #include #include +#include "paddle/phi/common/int_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { @@ -29,7 +30,7 @@ void Conv2dTransposeGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, @@ -47,7 +48,7 @@ void Conv2dTransposeDoubleGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, @@ -80,7 +81,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/conv_transpose_kernel.h b/paddle/phi/kernels/conv_transpose_kernel.h index e39617e0e7..bb82b971b4 100644 --- a/paddle/phi/kernels/conv_transpose_kernel.h +++ b/paddle/phi/kernels/conv_transpose_kernel.h @@ -17,6 +17,7 @@ #include #include +#include "paddle/phi/common/int_array.h" #include "paddle/phi/core/dense_tensor.h" namespace phi { @@ -28,7 +29,7 @@ void Conv2dTransposeKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, @@ -56,7 +57,7 @@ void DepthwiseConv2dTransposeKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc b/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc index 17fe44dea3..e502fd7d1f 100644 --- a/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/conv_transpose_grad_kernel.cc @@ -27,7 +27,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/cpu/conv_transpose_kernel.cc b/paddle/phi/kernels/cpu/conv_transpose_kernel.cc index ad9a5933f2..5b35b02e76 100644 --- a/paddle/phi/kernels/cpu/conv_transpose_kernel.cc +++ b/paddle/phi/kernels/cpu/conv_transpose_kernel.cc @@ -26,7 +26,7 @@ void DepthwiseConv2dTransposeKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu b/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu index 0f7498a16d..12f56dc6cc 100644 --- a/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu @@ -34,7 +34,7 @@ void Conv2dTransposeDoubleGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, @@ -64,7 +64,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/gpu/conv_transpose_kernel.cu b/paddle/phi/kernels/gpu/conv_transpose_kernel.cu index 94d0d536a8..2d60b61017 100644 --- a/paddle/phi/kernels/gpu/conv_transpose_kernel.cu +++ b/paddle/phi/kernels/gpu/conv_transpose_kernel.cu @@ -31,7 +31,7 @@ void DepthwiseConv2dTransposeKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu b/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu index 626ef79d56..3acb1604f4 100644 --- a/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu +++ b/paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu @@ -383,7 +383,7 @@ void Conv2dTransposeGradGPUDNNKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings_, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations_, @@ -422,7 +422,7 @@ void Conv2dTransposeDoubleGradGPUDNNKernel( const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu b/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu index 2289541da1..6fc1e2eff1 100644 --- a/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu +++ b/paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu @@ -308,7 +308,7 @@ void Conv2dTransposeGPUDNNKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h b/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h index 3fbaf2b2d4..d4c2e96fbf 100644 --- a/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h @@ -310,7 +310,7 @@ void Conv2dTransposeGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h b/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h index a76545716a..17d9eed70e 100644 --- a/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h +++ b/paddle/phi/kernels/impl/conv_transpose_kernel_impl.h @@ -231,7 +231,7 @@ void Conv2dTransposeKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc b/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc index 49061069b8..9db36ace02 100644 --- a/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc @@ -27,7 +27,7 @@ void Conv2dTransposeGradKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/paddle/phi/kernels/xpu/conv_transpose_kernel.cc b/paddle/phi/kernels/xpu/conv_transpose_kernel.cc index 3fcd4b4a32..ca0c0084d3 100644 --- a/paddle/phi/kernels/xpu/conv_transpose_kernel.cc +++ b/paddle/phi/kernels/xpu/conv_transpose_kernel.cc @@ -42,7 +42,7 @@ void Conv2dTransposeKernel(const Context& ctx, const std::vector& strides, const std::vector& paddings, const std::vector& output_padding, - const std::vector& output_size, + const IntArray& output_size, const std::string& padding_algorithm, int groups, const std::vector& dilations, diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 136a08fb09..e0262fb113 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -2717,12 +2717,29 @@ class Conv2DTranspose(layers.Layer): if self._output_size is None: self._output_size = [] - elif isinstance(self._output_size, list) or isinstance( - self._output_size, int): + elif isinstance(self._output_size, list): + if utils._contain_var(self._output_size): + self._output_size = utils._convert_to_tensor_list( + self._output_size) + else: + self._output_size = utils.convert_to_list( + self._output_size, 2, 'output_size') + elif isinstance(self._output_size, int): self._output_size = utils.convert_to_list(self._output_size, 2, 'output_size') + elif isinstance(self._output_size, Variable): + check_dtype(self._output_size.dtype, 'output_size', + ['int32', 'int64'], 'Conv2DTranspose') + if len(self._output_size.shape) == 1 and ( + self._output_size.shape[0] == 1 + or self._output_size.shape[0] == 2): + if self._output_size.shape[0] == 1: + self._output_size = [self._output_size, self._output_size] + else: + raise ValueError( + "output_size must contain one or two integers.") else: - raise ValueError("output_size should be list or int") + raise ValueError("output_size should be list or int or Tensor") self._padding = utils.convert_to_list(self._padding, 2, 'padding') self._groups = 1 if self._groups is None else self._groups filter_shape = [self._num_channels, self._num_filters // self._groups diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b5615ee1bb..b4330f1c4a 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -4216,11 +4216,42 @@ def conv2d_transpose(input, padding = _update_padding(padding, data_format) + if output_size is None: + output_size = [] + elif isinstance(output_size, (list, tuple)): + if utils._contain_var(output_size): + output_size = utils._convert_to_tensor_list(output_size) + else: + output_size = utils.convert_to_list(output_size, 2, 'output_size') + elif isinstance(output_size, int): + output_size = utils.convert_to_list(output_size, 2, 'output_size') + elif isinstance(output_size, Variable): + check_dtype(output_size.dtype, 'output_size', ['int32', 'int64'], + 'conv2d_transpose') + if len(output_size.shape) == 1 and (output_size.shape[0] == 1 + or output_size.shape[0] == 2): + if output_size.shape[0] == 1: + output_size = [output_size, output_size] + else: + raise ValueError("output_size must contain one or two integers.") + else: + raise ValueError( + "output_size should be int, list[int] or tuple[int] or Tensor") + if filter_size is None: - if output_size is None: + if output_size is []: raise ValueError("output_size must be set when filter_size is None") - if isinstance(output_size, int): - output_size = [output_size, output_size] + if not _non_static_mode(): + if isinstance(output_size, + Variable) or utils._contain_var(output_size): + raise ValueError( + "filter_size should not be None when output_size is Variable or contain Variable in static mode." + ) + else: + output_size = utils.convert_shape_to_list(output_size) + if len(output_size) == 1: + output_size = utils.convert_to_list(output_size[0], 2, + 'output_size') h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1] w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2] @@ -4237,13 +4268,6 @@ def conv2d_transpose(input, if len(padding) == 4 and utils._is_symmetric_padding(padding, 2): padding = [padding[0], padding[2]] - if output_size is None: - output_size = [] - elif isinstance(output_size, (list, tuple, int)): - output_size = utils.convert_to_list(output_size, 2, 'output_size') - else: - raise ValueError("output_size should be int, list[int] or tuple[int]") - if groups is None: groups = 1 elif groups <= 0: diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index c10d71baf3..c2fff92ffe 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -14,6 +14,7 @@ from __future__ import print_function +import os import unittest import numpy as np @@ -23,7 +24,9 @@ import paddle.nn as nn paddle.enable_static() import paddle.fluid.core as core import paddle.fluid as fluid -from paddle.fluid.tests.unittests.op_test import OpTest +from paddle.fluid import Program, program_guard +from test_attribute_var import UnittestBase +from op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, attrs): @@ -974,5 +977,175 @@ class TestConv2DTransposeRepr(unittest.TestCase): paddle.enable_static() +class TestTensorOutputSize1(UnittestBase): + + def init_info(self): + self.shapes = [[2, 3, 8, 8]] + self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size1' + + def var_prefix(self): + return "Vars[" + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = paddle.assign([17]) + out = paddle.paddle.nn.functional.conv2d_transpose( + x, w_var, stride=2, output_size=output_size) + return out + + def test_static(self): + main_prog = Program() + starup_prog = Program() + with program_guard(main_prog, starup_prog): + fc = paddle.nn.Linear(8, 8) + x = paddle.randn([2, 3, 8, 8]) + x.stop_gradient = False + feat = fc(x) + out = self.call_func(feat) + + sgd = paddle.optimizer.SGD() + sgd.minimize(paddle.mean(out)) + self.assertTrue(self.var_prefix() in str(main_prog)) + + exe = paddle.static.Executor() + exe.run(starup_prog) + res = exe.run(fetch_list=[feat, out]) + np.testing.assert_allclose(res[1].shape, (2, 6, 17, 17)) + + paddle.static.save_inference_model(self.save_path, [x], [feat, out], + exe) + # Test for Inference Predictor + infer_outs = self.infer_prog() + np.testing.assert_allclose(infer_outs[1].shape, (2, 6, 17, 17)) + + +class TestTensorOutputSize2(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size2' + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = [17, paddle.assign([17])] + out = paddle.paddle.nn.functional.conv2d_transpose( + x, w_var, stride=2, output_size=output_size) + return out + + +class TestTensorOutputSize3(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size3' + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = paddle.assign([17]) + out = paddle.fluid.layers.conv2d_transpose(x, + num_filters=6, + output_size=output_size, + filter_size=3, + stride=2) + return out + + +class TestTensorOutputSize4(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size4' + + def call_func(self, x): + output_size = [17, paddle.assign([17])] + out = paddle.fluid.layers.conv2d_transpose(x, + num_filters=6, + output_size=output_size, + filter_size=3, + stride=2) + return out + + +class TestTensorOutputSize5(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size5' + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = [17, paddle.assign([17])] + conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose( + num_channels=3, + num_filters=6, + filter_size=3, + output_size=output_size, + stride=2) + out = conv2d_trans(x) + return out + + +class TestTensorOutputSize6(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size6' + + def var_prefix(self): + return "Var[" + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = paddle.assign([17, 17]) + conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose( + num_channels=3, + num_filters=6, + filter_size=3, + output_size=output_size, + stride=2) + out = conv2d_trans(x) + return out + + +class TestTensorOutputSize7(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size7' + + def var_prefix(self): + return "" + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = 17 + conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose( + num_channels=3, + num_filters=6, + filter_size=3, + output_size=output_size, + stride=2) + out = conv2d_trans(x) + return out + + +class TestTensorOutputSize8(TestTensorOutputSize1): + + def path_prefix(self): + return 'conv2d_transpose_tensor_output_size8' + + def var_prefix(self): + return "" + + def call_func(self, x): + w_var = paddle.randn((3, 6, 3, 3), dtype='float32') + output_size = [17, 17] + conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose( + num_channels=3, + num_filters=6, + filter_size=3, + output_size=output_size, + stride=2) + out = conv2d_trans(x) + return out + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 1f2ddb98bf..06784f5d13 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -17,8 +17,8 @@ import numpy as np from ...device import get_cudnn_version from ...static import Variable from ...fluid import dygraph_utils -from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding -from ...fluid.data_feeder import check_variable_and_dtype +from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding, _contain_var, _convert_to_tensor_list +from ...fluid.data_feeder import check_variable_and_dtype, check_dtype from ...framework import ParamAttr from ...fluid.layer_helper import LayerHelper from ...tensor.manipulation import unsqueeze, squeeze @@ -35,6 +35,7 @@ from paddle.device import is_compiled_with_rocm from paddle.fluid.framework import _global_flags from paddle.fluid.framework import _in_legacy_dygraph from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.framework import _non_static_mode __all__ = [] @@ -1133,11 +1134,27 @@ def conv2d_transpose(x, if output_padding != 0: raise ValueError('output_padding option is mutually exclusive with ' 'output_size') - if isinstance(output_size, (list, tuple, int)): + if isinstance(output_size, (list, tuple)): + if _contain_var(output_size): + output_size = _convert_to_tensor_list(output_size) + else: + output_size = convert_to_list(output_size, 2, 'output_size') + elif isinstance(output_size, int): output_size = convert_to_list(output_size, 2, 'output_size') + elif isinstance(output_size, Variable): + check_dtype(output_size.dtype, 'output_size', ['int32', 'int64'], + 'conv2d_transpose') + if len(output_size.shape) == 1 and (output_size.shape[0] == 1 + or output_size.shape[0] == 2): + if output_size.shape[0] == 1: + output_size = [output_size, output_size] + else: + raise ValueError( + "output_size must contain one or two integers.") else: raise ValueError( - "output_size should be int, or list, tuple of ints") + "output_size should be int or Tensor or list, tuple of ints or Tensor" + ) if output_padding == 0: output_padding = [] -- GitLab