未验证 提交 d26ae9ad 编写于 作者: L LielinJiang 提交者: GitHub

Update conv_transpose api (#26427)

* update conv_transpose api
上级 c037d625
......@@ -37,6 +37,8 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
auto filter_dims = ctx->GetInputDim("Filter");
std::vector<int> output_size =
ctx->Attrs().Get<std::vector<int>>("output_size");
std::vector<int> output_padding =
ctx->Attrs().Get<std::vector<int>>("output_padding");
std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
std::vector<int> dilations = ctx->Attrs().Get<std::vector<int>>("dilations");
......@@ -78,6 +80,12 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
platform::errors::InvalidArgument(
"The Attr(output_size) and Attr(stride) of Op(conv_transpose) "
"should be the same."));
if (output_padding.size())
PADDLE_ENFORCE_EQ(
output_padding.size(), strides.size(),
platform::errors::InvalidArgument(
"The Attr(output_padding) and Attr(stride) of Op(conv_transpose) "
"should be the same."));
const int64_t C =
(data_layout != DataLayout::kNHWC ? in_dims[1]
......@@ -136,6 +144,27 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
infer_shape + strides[i]));
}
output_shape.push_back(output_size[i]);
} else if (output_padding.size()) {
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_GE(
output_padding[i], 0,
platform::errors::InvalidArgument(
"output_padding of Op(ConvTransposeOp) should not be "
"less than the 0. But received output_padding = "
"[%s], whose dim %d is less than 0",
framework::make_ddim(output_padding), i));
PADDLE_ENFORCE_LT(
output_padding[i], std::max(strides[i], dilations[i]),
platform::errors::InvalidArgument(
"output_padding of Op(ConvTransposeOp) should be less "
"than either stride or dilation. But received output_size = "
"[%s], "
"whose dim %d is not less than either stride (%d) or "
"dilation (%d)",
framework::make_ddim(output_size), i, strides[i],
dilations[i]));
}
output_shape.push_back((infer_shape + output_padding[i]));
} else {
output_shape.push_back(infer_shape);
}
......@@ -223,10 +252,14 @@ void Conv2DTransposeOpMaker::Make() {
"The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels. Only used with MKL-DNN.")
.AsDispensable();
AddOutput("Output",
"(Tensor) The output tensor of convolution transpose operator. "
"The format of output tensor is the same as input tensor.");
AddAttr<std::vector<int>>("output_padding",
"(vector<int> default: []), Additional size added "
"to one side of each dimension in the output "
"shape")
.SetDefault({});
AddAttr<std::vector<int>>("output_size",
"(vector<int> default: []), the "
"size of the output tensor")
......@@ -338,6 +371,11 @@ void Conv3DTransposeOpMaker::Make() {
"Where N is batch size, C is "
"the number of channels, D is the depth of the feature, H is the "
"height of the feature, and W is the width of the feature.");
AddAttr<std::vector<int>>("output_padding",
"(vector<int> default: []), Additional size added "
"to one side of each dimension in the output "
"shape")
.SetDefault({});
AddAttr<std::vector<int>>("output_size",
"(vector<int> default: []), the "
"size of the output tensor")
......
......@@ -29,13 +29,12 @@ class Conv2DTransposeTestCase(unittest.TestCase):
num_filters=8,
filter_size=3,
output_size=None,
output_padding=0,
padding=0,
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCHW",
dtype="float32"):
super(Conv2DTransposeTestCase, self).__init__(methodName)
......@@ -45,14 +44,13 @@ class Conv2DTransposeTestCase(unittest.TestCase):
self.spartial_shape = spartial_shape
self.filter_size = filter_size
self.output_size = output_size
self.output_padding = output_padding
self.padding = padding
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
......@@ -93,6 +91,7 @@ class Conv2DTransposeTestCase(unittest.TestCase):
bias_attr = False
else:
bias_attr = I.NumpyArrayInitializer(self.bias)
y_var = fluid.layers.conv2d_transpose(
x_var,
self.num_filters,
......@@ -104,8 +103,6 @@ class Conv2DTransposeTestCase(unittest.TestCase):
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
......@@ -125,17 +122,22 @@ class Conv2DTransposeTestCase(unittest.TestCase):
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv2d_transpose(
if self.output_padding != 0:
output_size = None
else:
output_size = self.output_size
y_var = F.conv_transpose2d(
x_var,
w_var,
None if self.no_bias else b_var,
output_size=self.output_size,
output_size=output_size,
padding=self.padding,
output_padding=self.output_padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
......@@ -147,32 +149,38 @@ class Conv2DTransposeTestCase(unittest.TestCase):
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv2DTranspose(
if self.output_padding != 0:
output_size = None
else:
output_size = self.output_size
conv = nn.ConvTranspose2d(
self.num_channels,
self.num_filters,
self.filter_size,
output_size=self.output_size,
padding=self.padding,
output_padding=self.output_padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
data_format=self.data_format)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
y_var = conv(x_var)
y_var = conv(x_var, output_size)
y_np = y_var.numpy()
return y_np
def _test_equivalence(self, place):
place = fluid.CPUPlace()
result1 = self.fluid_layer(place)
result2 = self.functional(place)
with dg.guard(place):
result3 = self.paddle_nn_layer()
np.testing.assert_array_almost_equal(result1, result2)
np.testing.assert_array_almost_equal(result2, result3)
......@@ -194,7 +202,7 @@ class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase):
def add_cases(suite):
suite.addTest(Conv2DTransposeTestCase(methodName='runTest', act="relu"))
suite.addTest(Conv2DTransposeTestCase(methodName='runTest'))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', stride=[1, 2], no_bias=True, dilation=2))
......@@ -211,9 +219,6 @@ def add_cases(suite):
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', padding="valid"))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', padding='valid'))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest', filter_size=1, padding=(2, 3)))
......@@ -240,15 +245,22 @@ def add_cases(suite):
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
suite.addTest(
Conv2DTransposeTestCase(
methodName='runTest',
num_filters=6,
num_channels=3,
spartial_shape=(7, 7),
filter_size=[5, 5],
groups=1,
padding=2,
stride=2,
output_size=[14, 14],
output_padding=[1, 1], ))
def add_error_cases(suite):
suite.addTest(
Conv2DTransposeErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv2DTransposeErrorTestCase(
methodName='runTest', num_channels=5, groups=2))
......
......@@ -77,8 +77,13 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
output_size = attrs['output_size']
out_h = output_size[0] + pad_h_0 + pad_h_1
out_w = output_size[1] + pad_w_0 + pad_w_1
out = np.zeros((in_n, out_c, out_h, out_w), dtype=input_.dtype)
out_pad_h = 0
out_pad_w = 0
if 'output_padding' in attrs:
out_pad_h = attrs['output_padding'][0]
out_pad_w = attrs['output_padding'][1]
out = np.zeros(
(in_n, out_c, out_h + out_pad_h, out_w + out_pad_w), dtype=input_.dtype)
for n in range(in_n):
for i in range(in_h):
......@@ -99,7 +104,8 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2:
dilations[1]] += tmp_out
out = out[:, :, pad_h_0:out_h - pad_h_1, pad_w_0:out_w - pad_w_1]
out = out[:, :, pad_h_0:out_h - pad_h_1 + out_pad_h, pad_w_0:out_w - pad_w_1
+ out_pad_w]
if attrs['data_format'] == 'NHWC':
out = np.transpose(out, [0, 2, 3, 1])
return out
......@@ -114,6 +120,7 @@ class TestConv2dTransposeOp(OpTest):
self.use_cudnn = False
self.use_mkldnn = False
self.output_size = None
self.output_padding = []
self.data_format = "NCHW"
self.pad = [0, 0]
self.padding_algorithm = "EXPLICIT"
......@@ -138,6 +145,9 @@ class TestConv2dTransposeOp(OpTest):
if self.output_size is not None:
self.attrs['output_size'] = self.output_size
if len(self.output_padding) > 0:
self.attrs['output_padding'] = self.output_padding
output = conv2dtranspose_forward_naive(input_, filter_,
self.attrs).astype(self.dtype)
......@@ -290,6 +300,18 @@ class TestWithEvenUpsample(TestConv2dTransposeOp):
self.filter_size = [f_c, 6, 5, 5]
class TestWithEvenUpsampleOutputPadding(TestConv2dTransposeOp):
def init_test_case(self):
self.pad = [2, 2]
self.stride = [2, 2]
self.groups = 1
self.dilations = [1, 1]
self.output_padding = [1, 1]
self.input_size = [2, 3, 7, 7] # NCHW
f_c = self.input_size[1]
self.filter_size = [f_c, 6, 5, 5]
class Test_NHWC(TestConv2dTransposeOp):
def init_test_case(self):
self.pad = [0, 0]
......@@ -375,6 +397,19 @@ class TestWithEvenUpsample_NHWC(TestConv2dTransposeOp):
self.data_format = 'NHWC'
class TestWithEvenUpsample_NHWC_output_padding(TestConv2dTransposeOp):
def init_test_case(self):
self.pad = [2, 2]
self.stride = [2, 2]
self.groups = 1
self.dilations = [1, 1]
self.output_padding = [1, 1]
self.input_size = [2, 7, 7, 3] # NHWC
f_c = self.input_size[-1]
self.filter_size = [f_c, 6, 5, 5]
self.data_format = 'NHWC'
# ------------ test_cudnn ------------
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
......
......@@ -33,9 +33,7 @@ class Conv3DTransposeTestCase(unittest.TestCase):
stride=1,
dilation=1,
groups=1,
act=None,
no_bias=False,
use_cudnn=True,
data_format="NCDHW",
dtype="float32"):
super(Conv3DTransposeTestCase, self).__init__(methodName)
......@@ -50,9 +48,7 @@ class Conv3DTransposeTestCase(unittest.TestCase):
self.stride = stride
self.dilation = dilation
self.groups = groups
self.act = act
self.no_bias = no_bias
self.use_cudnn = use_cudnn
self.data_format = data_format
self.dtype = dtype
......@@ -104,8 +100,6 @@ class Conv3DTransposeTestCase(unittest.TestCase):
groups=self.groups,
param_attr=weight_attr,
bias_attr=bias_attr,
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
feed_dict = {"input": self.input}
exe = fluid.Executor(place)
......@@ -125,7 +119,7 @@ class Conv3DTransposeTestCase(unittest.TestCase):
"weight", self.weight_shape, dtype=self.dtype)
b_var = fluid.data(
"bias", (self.num_filters, ), dtype=self.dtype)
y_var = F.conv3d_transpose(
y_var = F.conv_transpose3d(
x_var,
w_var,
None if self.no_bias else b_var,
......@@ -134,8 +128,6 @@ class Conv3DTransposeTestCase(unittest.TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format)
feed_dict = {"input": self.input, "weight": self.weight}
if self.bias is not None:
......@@ -147,23 +139,19 @@ class Conv3DTransposeTestCase(unittest.TestCase):
def paddle_nn_layer(self):
x_var = dg.to_variable(self.input)
conv = nn.Conv3DTranspose(
conv = nn.ConvTranspose3d(
self.num_channels,
self.num_filters,
self.filter_size,
output_size=self.output_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
use_cudnn=self.use_cudnn,
data_format=self.data_format,
dtype=self.dtype)
data_format=self.data_format)
conv.weight.set_value(self.weight)
if not self.no_bias:
conv.bias.set_value(self.bias)
y_var = conv(x_var)
y_var = conv(x_var, self.output_size)
y_np = y_var.numpy()
return y_np
......@@ -194,7 +182,7 @@ class Conv3DTransposeErrorTestCase(Conv3DTransposeTestCase):
def add_cases(suite):
suite.addTest(Conv3DTransposeTestCase(methodName='runTest', act="tanh"))
suite.addTest(Conv3DTransposeTestCase(methodName='runTest'))
suite.addTest(
Conv3DTransposeTestCase(
methodName='runTest', stride=[1, 2, 1], dilation=2, no_bias=True))
......@@ -240,15 +228,10 @@ def add_cases(suite):
num_filters=6,
num_channels=3,
groups=3,
use_cudnn=False,
act="sigmoid",
padding="valid"))
def add_error_cases(suite):
suite.addTest(
Conv3DTransposeErrorTestCase(
methodName='runTest', use_cudnn="not_valid"))
suite.addTest(
Conv3DTransposeErrorTestCase(
methodName='runTest', num_channels=5, groups=2))
......
......@@ -37,8 +37,6 @@ class TestFunctionalConv2D(TestCase):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def prepare(self):
......@@ -90,8 +88,6 @@ class TestFunctionalConv2D(TestCase):
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
......@@ -115,7 +111,7 @@ class TestFunctionalConv2D(TestCase):
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv2d_transpose(
y = F.conv_transpose2d(
x,
weight,
None if self.no_bias else bias,
......@@ -124,9 +120,7 @@ class TestFunctionalConv2D(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
......@@ -140,7 +134,7 @@ class TestFunctionalConv2D(TestCase):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv2d_transpose(
y = F.conv_transpose2d(
x,
weight,
bias,
......@@ -148,10 +142,8 @@ class TestFunctionalConv2D(TestCase):
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
out = y.numpy()
return out
......@@ -189,8 +181,6 @@ class TestFunctionalConv2DError(TestCase):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
def test_exception(self):
......@@ -225,7 +215,7 @@ class TestFunctionalConv2DError(TestCase):
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv2d_transpose(
y = F.conv_transpose2d(
x,
weight,
None if self.no_bias else bias,
......@@ -234,9 +224,7 @@ class TestFunctionalConv2DError(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
class TestFunctionalConv2DCase2(TestFunctionalConv2D):
......@@ -249,8 +237,6 @@ class TestFunctionalConv2DCase2(TestFunctionalConv2D):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -264,8 +250,6 @@ class TestFunctionalConv2DCase3(TestFunctionalConv2D):
self.dilation = 1
self.groups = 1
self.no_bias = True
self.act = None
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -279,8 +263,6 @@ class TestFunctionalConv2DCase4(TestFunctionalConv2D):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -294,8 +276,6 @@ class TestFunctionalConv2DCase5(TestFunctionalConv2D):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -309,8 +289,6 @@ class TestFunctionalConv2DCase6(TestFunctionalConv2D):
self.dilation = (2, 1)
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -324,8 +302,6 @@ class TestFunctionalConv2DCase7(TestFunctionalConv2D):
self.dilation = 1
self.groups = 4
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NHWC"
......@@ -340,8 +316,6 @@ class TestFunctionalConv2DCase8(TestFunctionalConv2D):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -355,8 +329,6 @@ class TestFunctionalConv2DCase9(TestFunctionalConv2D):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -370,8 +342,6 @@ class TestFunctionalConv2DCase10(TestFunctionalConv2D):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -385,8 +355,6 @@ class TestFunctionalConv2DCase11(TestFunctionalConv2D):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -400,8 +368,6 @@ class TestFunctionalConv2DCase12(TestFunctionalConv2D):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -415,8 +381,6 @@ class TestFunctionalConv2DErrorCase2(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -430,8 +394,6 @@ class TestFunctionalConv2DErrorCase3(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NHWC"
......@@ -445,8 +407,6 @@ class TestFunctionalConv2DErrorCase4(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -460,23 +420,6 @@ class TestFunctionalConv2DErrorCase5(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
class TestFunctionalConv2DErrorCase6(TestFunctionalConv2DError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCHW"
......@@ -491,8 +434,6 @@ class TestFunctionalConv2DErrorCase7(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......@@ -506,8 +447,6 @@ class TestFunctionalConv2DErrorCase8(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
......@@ -521,8 +460,6 @@ class TestFunctionalConv2DErrorCase9(TestFunctionalConv2DError):
self.dilation = 1
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCHW"
......
......@@ -38,7 +38,6 @@ class TestFunctionalConv3DTranspose(TestCase):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def prepare(self):
......@@ -90,7 +89,6 @@ class TestFunctionalConv3DTranspose(TestCase):
param_attr=I.NumpyArrayInitializer(self.weight),
bias_attr=False
if self.no_bias else I.NumpyArrayInitializer(self.bias),
use_cudnn=self.use_cudnn,
act=self.act,
data_format=self.data_format)
exe = fluid.Executor(self.place)
......@@ -115,7 +113,7 @@ class TestFunctionalConv3DTranspose(TestCase):
"weight", self.weight.shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias.shape, dtype=self.dtype)
y = F.conv3d_transpose(
y = F.conv_transpose3d(
x,
weight,
None if self.no_bias else bias,
......@@ -124,9 +122,9 @@ class TestFunctionalConv3DTranspose(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
exe = fluid.Executor(self.place)
exe.run(start)
feed_dict = {"input": self.input, "weight": self.weight}
......@@ -140,7 +138,7 @@ class TestFunctionalConv3DTranspose(TestCase):
x = dg.to_variable(self.input)
weight = dg.to_variable(self.weight)
bias = None if self.no_bias else dg.to_variable(self.bias)
y = F.conv3d_transpose(
y = F.conv_transpose3d(
x,
weight,
bias,
......@@ -148,10 +146,10 @@ class TestFunctionalConv3DTranspose(TestCase):
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
act=self.act,
groups=self.groups,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
out = y.numpy()
return out
......@@ -190,7 +188,6 @@ class TestFunctionalConv3DTransposeError(TestCase):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
def test_exception(self):
......@@ -225,7 +222,7 @@ class TestFunctionalConv3DTransposeError(TestCase):
"weight", self.weight_shape, dtype=self.dtype)
if not self.no_bias:
bias = fluid.data("bias", self.bias_shape, dtype=self.dtype)
y = F.conv3d_transpose(
y = F.conv_transpose3d(
x,
weight,
None if self.no_bias else bias,
......@@ -234,9 +231,9 @@ class TestFunctionalConv3DTransposeError(TestCase):
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
act=self.act,
data_format=self.data_format,
use_cudnn=self.use_cudnn)
data_format=self.data_format)
if self.act == 'sigmoid':
y = F.sigmoid(y)
class TestFunctionalConv3DTransposeCase2(TestFunctionalConv3DTranspose):
......@@ -250,7 +247,6 @@ class TestFunctionalConv3DTransposeCase2(TestFunctionalConv3DTranspose):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -265,7 +261,6 @@ class TestFunctionalConv3DTransposeCase3(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
......@@ -280,7 +275,6 @@ class TestFunctionalConv3DTransposeCase4(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = True
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
......@@ -295,7 +289,6 @@ class TestFunctionalConv3DTransposeCase5(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
......@@ -310,7 +303,6 @@ class TestFunctionalConv3DTransposeCase6(TestFunctionalConv3DTranspose):
self.groups = 4
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = False
self.data_format = "NDHWC"
......@@ -326,7 +318,6 @@ class TestFunctionalConv3DTransposeCase7(TestFunctionalConv3DTranspose):
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -341,7 +332,6 @@ class TestFunctionalConv3DTransposeCase8(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
......@@ -356,7 +346,6 @@ class TestFunctionalConv3DTransposeCase9(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -371,7 +360,6 @@ class TestFunctionalConv3DTransposeCase10(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -386,7 +374,6 @@ class TestFunctionalConv3DTransposeCase11(TestFunctionalConv3DTranspose):
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -402,7 +389,6 @@ class TestFunctionalConv3DTransposeErrorCase2(
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
......@@ -418,7 +404,6 @@ class TestFunctionalConv3DTransposeErrorCase3(
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NDHWC"
......@@ -434,7 +419,6 @@ class TestFunctionalConv3DTransposeErrorCase4(
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -450,23 +434,6 @@ class TestFunctionalConv3DTransposeErrorCase5(
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
class TestFunctionalConv3DTransposeErrorCase6(
TestFunctionalConv3DTransposeError):
def setUp(self):
self.in_channels = 4
self.out_channels = 5
self.filter_shape = 3
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = "not_valid"
self.data_format = "NCDHW"
......@@ -483,7 +450,6 @@ class TestFunctionalConv3DTransposeErrorCase7(
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......@@ -499,7 +465,6 @@ class TestFunctionalConv3DTransposeErrorCase8(
self.groups = 1
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "not_valid"
......@@ -515,7 +480,6 @@ class TestFunctionalConv3DTransposeErrorCase9(
self.groups = 2
self.no_bias = False
self.act = "sigmoid"
self.use_cudnn = True
self.data_format = "NCDHW"
......
......@@ -94,9 +94,9 @@ from .layer.common import Dropout3D #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool2d #DEFINE_ALIAS
from .layer.pooling import AdaptiveAvgPool3d #DEFINE_ALIAS
from .layer.conv import Conv2D #DEFINE_ALIAS
from .layer.conv import Conv2DTranspose #DEFINE_ALIAS
from .layer.conv import ConvTranspose2d #DEFINE_ALIAS
from .layer.conv import Conv3D #DEFINE_ALIAS
from .layer.conv import Conv3DTranspose #DEFINE_ALIAS
from .layer.conv import ConvTranspose3d #DEFINE_ALIAS
# from .layer.conv import TreeConv #DEFINE_ALIAS
# from .layer.conv import Conv1D #DEFINE_ALIAS
from .layer.extension import RowConv #DEFINE_ALIAS
......
......@@ -70,9 +70,9 @@ from .common import unfold #DEFINE_ALIAS
from .common import assign #DEFINE_ALIAS
from .common import interpolate #DEFINE_ALIAS
from .conv import conv2d #DEFINE_ALIAS
from .conv import conv2d_transpose #DEFINE_ALIAS
from .conv import conv_transpose2d #DEFINE_ALIAS
from .conv import conv3d #DEFINE_ALIAS
from .conv import conv3d_transpose #DEFINE_ALIAS
from .conv import conv_transpose3d #DEFINE_ALIAS
from .extension import add_position_encoding #DEFINE_ALIAS
# from .extension import autoincreased_step_counter #DEFINE_ALIAS
from .extension import continuous_value_model #DEFINE_ALIAS
......
此差异已折叠。
......@@ -58,9 +58,9 @@ from .common import Dropout3D #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool2d #DEFINE_ALIAS
from .pooling import AdaptiveAvgPool3d #DEFINE_ALIAS
from .conv import Conv2D #DEFINE_ALIAS
from .conv import Conv2DTranspose #DEFINE_ALIAS
from .conv import ConvTranspose2d #DEFINE_ALIAS
from .conv import Conv3D #DEFINE_ALIAS
from .conv import Conv3DTranspose #DEFINE_ALIAS
from .conv import ConvTranspose3d #DEFINE_ALIAS
# from .conv import TreeConv #DEFINE_ALIAS
# from .conv import Conv1D #DEFINE_ALIAS
from .extension import RowConv #DEFINE_ALIAS
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册