未验证 提交 fe169bf1 编写于 作者: W WangZhen 提交者: GitHub

[OpAttr]Adapt tensor output_size for conv2d_transpose and depthwise_conv2d_transpose (#45620)

Adapt tensor output_size for conv2d_transpose and depthwise_conv2d_transpose
上级 d9a9e638
......@@ -121,7 +121,8 @@ void Conv2DTransposeOpMaker::Make() {
AddAttr<std::vector<int>>("output_size",
"(vector<int> default: []), the "
"size of the output tensor")
.SetDefault({});
.SetDefault({})
.SupportTensor();
AddAttr<int>("groups",
"(int default:1), the groups number of the convolution "
"transpose operator. ")
......@@ -398,10 +399,10 @@ namespace ops = paddle::operators;
// conv2d_transpose
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose,
Conv2dTranposeInferShapeFunctor,
PD_INFER_META(phi::ConvTransposeInferMeta));
PD_INFER_META(phi::Conv2dTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(conv2d_transpose_grad,
Conv2dTranposeGradInferShapeFunctor,
PD_INFER_META(phi::ConvTransposeGradInferMeta));
PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(
conv2d_transpose_grad_grad,
Conv2dTranposeDoubleGradInferShapeFunctor,
......@@ -443,10 +444,10 @@ REGISTER_OPERATOR(conv3d_transpose_grad,
// depthwise conv2d_transpose
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose,
DepthWiseConv2dTranposeInferShapeFunctor,
PD_INFER_META(phi::ConvTransposeInferMeta));
PD_INFER_META(phi::Conv2dTransposeInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(depthwise_conv2d_transpose_grad,
DepthWiseConv2dTranposeGradInferShapeFunctor,
PD_INFER_META(phi::ConvTransposeGradInferMeta));
PD_INFER_META(phi::Conv2dTransposeGradInferMeta));
REGISTER_OPERATOR(depthwise_conv2d_transpose,
ops::ConvTransposeOp,
......
......@@ -541,10 +541,10 @@
backward : conv2d_grad
- api : conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
func : Conv2dTransposeInferMeta
kernel :
func : conv2d_transpose
use_gpudnn : true
......@@ -665,10 +665,10 @@
backward : depthwise_conv2d_grad
- api : depthwise_conv2d_transpose
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
args : (Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(out)
infer_meta :
func : ConvTransposeInferMeta
func : Conv2dTransposeInferMeta
kernel :
func : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad
......
......@@ -483,8 +483,8 @@
optional : grad_input_grad, grad_filter_grad
- backward_api : conv2d_transpose_double_grad
forward : conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter)
args : (Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
forward : conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter)
args : (Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta :
func : Conv2dTransposeDoubleGradInferMeta
......@@ -493,11 +493,11 @@
use_gpudnn : true
- backward_api : conv2d_transpose_grad
forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
forward : conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
func : Conv2dTransposeGradInferMeta
kernel :
func : conv2d_transpose_grad
use_gpudnn : true
......@@ -635,11 +635,11 @@
optional : grad_input_grad, grad_filter_grad
- backward_api : depthwise_conv2d_transpose_grad
forward : depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
forward : depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args : (Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, IntArray output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output : Tensor(x_grad), Tensor(filter_grad)
infer_meta :
func : ConvTransposeGradInferMeta
func : Conv2dTransposeGradInferMeta
kernel :
func : depthwise_conv2d_transpose_grad
......
......@@ -143,6 +143,22 @@ void ConvTransposeGradInferMeta(const MetaTensor& x,
GeneralBinaryGradInferMeta(x, filter, dx, dfilter);
}
void Conv2dTransposeGradInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const MetaTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
MetaTensor* dx,
MetaTensor* dfilter) {
GeneralBinaryGradInferMeta(x, filter, dx, dfilter);
}
void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const MetaTensor& dout,
......@@ -151,7 +167,7 @@ void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -76,6 +76,20 @@ void ConvTransposeGradInferMeta(const MetaTensor& x,
MetaTensor* dx,
MetaTensor* dfilter);
void Conv2dTransposeGradInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const MetaTensor& dout,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
MetaTensor* dx,
MetaTensor* dfilter);
void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const MetaTensor& dout,
......@@ -84,7 +98,7 @@ void Conv2dTransposeDoubleGradInferMeta(const MetaTensor& x,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -800,6 +800,34 @@ void ConvTransposeInferMeta(const MetaTensor& x,
out->set_dtype(x.dtype());
}
void Conv2dTransposeInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
MetaTensor* out,
MetaConfig config) {
std::vector<int32_t> vec_output_size(output_size.GetData().begin(),
output_size.GetData().end());
ConvTransposeInferMeta(x,
filter,
strides,
paddings,
output_padding,
vec_output_size,
padding_algorithm,
groups,
dilations,
data_format,
out,
config);
}
void CrossInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
......
......@@ -119,6 +119,19 @@ void ConvTransposeInferMeta(const MetaTensor& x,
MetaTensor* out,
MetaConfig config = MetaConfig());
void Conv2dTransposeInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
const std::string& data_format,
MetaTensor* out,
MetaConfig config = MetaConfig());
void CrossInferMeta(const MetaTensor& x,
const MetaTensor& y,
int axis,
......
......@@ -17,6 +17,7 @@
#include <string>
#include <vector>
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -29,7 +30,7 @@ void Conv2dTransposeGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......@@ -47,7 +48,7 @@ void Conv2dTransposeDoubleGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......@@ -80,7 +81,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -17,6 +17,7 @@
#include <string>
#include <vector>
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
......@@ -28,7 +29,7 @@ void Conv2dTransposeKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......@@ -56,7 +57,7 @@ void DepthwiseConv2dTransposeKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -27,7 +27,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -26,7 +26,7 @@ void DepthwiseConv2dTransposeKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -34,7 +34,7 @@ void Conv2dTransposeDoubleGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......@@ -64,7 +64,7 @@ void DepthwiseConv2dTransposeGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -31,7 +31,7 @@ void DepthwiseConv2dTransposeKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -383,7 +383,7 @@ void Conv2dTransposeGradGPUDNNKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings_,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_,
......@@ -422,7 +422,7 @@ void Conv2dTransposeDoubleGradGPUDNNKernel(
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -308,7 +308,7 @@ void Conv2dTransposeGPUDNNKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -310,7 +310,7 @@ void Conv2dTransposeGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -231,7 +231,7 @@ void Conv2dTransposeKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -27,7 +27,7 @@ void Conv2dTransposeGradKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -42,7 +42,7 @@ void Conv2dTransposeKernel(const Context& ctx,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& output_padding,
const std::vector<int>& output_size,
const IntArray& output_size,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations,
......
......@@ -2717,12 +2717,29 @@ class Conv2DTranspose(layers.Layer):
if self._output_size is None:
self._output_size = []
elif isinstance(self._output_size, list) or isinstance(
self._output_size, int):
elif isinstance(self._output_size, list):
if utils._contain_var(self._output_size):
self._output_size = utils._convert_to_tensor_list(
self._output_size)
else:
self._output_size = utils.convert_to_list(
self._output_size, 2, 'output_size')
elif isinstance(self._output_size, int):
self._output_size = utils.convert_to_list(self._output_size, 2,
'output_size')
elif isinstance(self._output_size, Variable):
check_dtype(self._output_size.dtype, 'output_size',
['int32', 'int64'], 'Conv2DTranspose')
if len(self._output_size.shape) == 1 and (
self._output_size.shape[0] == 1
or self._output_size.shape[0] == 2):
if self._output_size.shape[0] == 1:
self._output_size = [self._output_size, self._output_size]
else:
raise ValueError(
"output_size must contain one or two integers.")
else:
raise ValueError("output_size should be list or int")
raise ValueError("output_size should be list or int or Tensor")
self._padding = utils.convert_to_list(self._padding, 2, 'padding')
self._groups = 1 if self._groups is None else self._groups
filter_shape = [self._num_channels, self._num_filters // self._groups
......
......@@ -4216,11 +4216,42 @@ def conv2d_transpose(input,
padding = _update_padding(padding, data_format)
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = []
elif isinstance(output_size, (list, tuple)):
if utils._contain_var(output_size):
output_size = utils._convert_to_tensor_list(output_size)
else:
output_size = utils.convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, int):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, Variable):
check_dtype(output_size.dtype, 'output_size', ['int32', 'int64'],
'conv2d_transpose')
if len(output_size.shape) == 1 and (output_size.shape[0] == 1
or output_size.shape[0] == 2):
if output_size.shape[0] == 1:
output_size = [output_size, output_size]
else:
raise ValueError("output_size must contain one or two integers.")
else:
raise ValueError(
"output_size should be int, list[int] or tuple[int] or Tensor")
if filter_size is None:
if output_size is []:
raise ValueError("output_size must be set when filter_size is None")
if not _non_static_mode():
if isinstance(output_size,
Variable) or utils._contain_var(output_size):
raise ValueError(
"filter_size should not be None when output_size is Variable or contain Variable in static mode."
)
else:
output_size = utils.convert_shape_to_list(output_size)
if len(output_size) == 1:
output_size = utils.convert_to_list(output_size[0], 2,
'output_size')
h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
......@@ -4237,13 +4268,6 @@ def conv2d_transpose(input,
if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
padding = [padding[0], padding[2]]
if output_size is None:
output_size = []
elif isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
else:
raise ValueError("output_size should be int, list[int] or tuple[int]")
if groups is None:
groups = 1
elif groups <= 0:
......
......@@ -14,6 +14,7 @@
from __future__ import print_function
import os
import unittest
import numpy as np
......@@ -23,7 +24,9 @@ import paddle.nn as nn
paddle.enable_static()
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid import Program, program_guard
from test_attribute_var import UnittestBase
from op_test import OpTest
def conv2dtranspose_forward_naive(input_, filter_, attrs):
......@@ -974,5 +977,175 @@ class TestConv2DTransposeRepr(unittest.TestCase):
paddle.enable_static()
class TestTensorOutputSize1(UnittestBase):
def init_info(self):
self.shapes = [[2, 3, 8, 8]]
self.save_path = os.path.join(self.temp_dir.name, self.path_prefix())
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size1'
def var_prefix(self):
return "Vars["
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = paddle.assign([17])
out = paddle.paddle.nn.functional.conv2d_transpose(
x, w_var, stride=2, output_size=output_size)
return out
def test_static(self):
main_prog = Program()
starup_prog = Program()
with program_guard(main_prog, starup_prog):
fc = paddle.nn.Linear(8, 8)
x = paddle.randn([2, 3, 8, 8])
x.stop_gradient = False
feat = fc(x)
out = self.call_func(feat)
sgd = paddle.optimizer.SGD()
sgd.minimize(paddle.mean(out))
self.assertTrue(self.var_prefix() in str(main_prog))
exe = paddle.static.Executor()
exe.run(starup_prog)
res = exe.run(fetch_list=[feat, out])
np.testing.assert_allclose(res[1].shape, (2, 6, 17, 17))
paddle.static.save_inference_model(self.save_path, [x], [feat, out],
exe)
# Test for Inference Predictor
infer_outs = self.infer_prog()
np.testing.assert_allclose(infer_outs[1].shape, (2, 6, 17, 17))
class TestTensorOutputSize2(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size2'
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = [17, paddle.assign([17])]
out = paddle.paddle.nn.functional.conv2d_transpose(
x, w_var, stride=2, output_size=output_size)
return out
class TestTensorOutputSize3(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size3'
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = paddle.assign([17])
out = paddle.fluid.layers.conv2d_transpose(x,
num_filters=6,
output_size=output_size,
filter_size=3,
stride=2)
return out
class TestTensorOutputSize4(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size4'
def call_func(self, x):
output_size = [17, paddle.assign([17])]
out = paddle.fluid.layers.conv2d_transpose(x,
num_filters=6,
output_size=output_size,
filter_size=3,
stride=2)
return out
class TestTensorOutputSize5(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size5'
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = [17, paddle.assign([17])]
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2)
out = conv2d_trans(x)
return out
class TestTensorOutputSize6(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size6'
def var_prefix(self):
return "Var["
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = paddle.assign([17, 17])
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2)
out = conv2d_trans(x)
return out
class TestTensorOutputSize7(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size7'
def var_prefix(self):
return ""
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = 17
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2)
out = conv2d_trans(x)
return out
class TestTensorOutputSize8(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size8'
def var_prefix(self):
return ""
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = [17, 17]
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2)
out = conv2d_trans(x)
return out
if __name__ == '__main__':
unittest.main()
......@@ -17,8 +17,8 @@ import numpy as np
from ...device import get_cudnn_version
from ...static import Variable
from ...fluid import dygraph_utils
from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding, _contain_var, _convert_to_tensor_list
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...framework import ParamAttr
from ...fluid.layer_helper import LayerHelper
from ...tensor.manipulation import unsqueeze, squeeze
......@@ -35,6 +35,7 @@ from paddle.device import is_compiled_with_rocm
from paddle.fluid.framework import _global_flags
from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import _non_static_mode
__all__ = []
......@@ -1133,11 +1134,27 @@ def conv2d_transpose(x,
if output_padding != 0:
raise ValueError('output_padding option is mutually exclusive with '
'output_size')
if isinstance(output_size, (list, tuple, int)):
if isinstance(output_size, (list, tuple)):
if _contain_var(output_size):
output_size = _convert_to_tensor_list(output_size)
else:
output_size = convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, int):
output_size = convert_to_list(output_size, 2, 'output_size')
elif isinstance(output_size, Variable):
check_dtype(output_size.dtype, 'output_size', ['int32', 'int64'],
'conv2d_transpose')
if len(output_size.shape) == 1 and (output_size.shape[0] == 1
or output_size.shape[0] == 2):
if output_size.shape[0] == 1:
output_size = [output_size, output_size]
else:
raise ValueError(
"output_size should be int, or list, tuple of ints")
"output_size must contain one or two integers.")
else:
raise ValueError(
"output_size should be int or Tensor or list, tuple of ints or Tensor"
)
if output_padding == 0:
output_padding = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册