From eb070476996bb5d26de5969b4d98892f104dcb42 Mon Sep 17 00:00:00 2001 From: chengduoZH Date: Thu, 16 Nov 2017 19:58:49 +0800 Subject: [PATCH] add padding --- paddle/operators/conv_transpose_op.cc | 7 +--- paddle/operators/conv_transpose_op.h | 6 +-- paddle/operators/math/im2col.cu | 4 +- .../paddle/v2/fluid/tests/test_conv2d_op.py | 40 +++++++++++++------ .../fluid/tests/test_conv2d_transpose_op.py | 26 +++++++++--- .../fluid/tests/test_conv3d_transpose_op.py | 31 ++++++++++---- 6 files changed, 77 insertions(+), 37 deletions(-) diff --git a/paddle/operators/conv_transpose_op.cc b/paddle/operators/conv_transpose_op.cc index 13ac0cd54c..310e3f5c93 100644 --- a/paddle/operators/conv_transpose_op.cc +++ b/paddle/operators/conv_transpose_op.cc @@ -30,11 +30,6 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { std::vector strides = ctx->Attrs().Get>("strides"); std::vector paddings = ctx->Attrs().Get>("paddings"); - for (size_t i = 0; i < paddings.size(); ++i) { - PADDLE_ENFORCE_EQ(paddings[i], 0, - "No Padding allowed in conv transpose op."); - } - PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5, "ConvTransposeOp intput should be 4-D or 5-D tensor."); PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(), @@ -52,7 +47,7 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { std::vector output_shape({in_dims[0], filter_dims[1]}); for (size_t i = 0; i < strides.size(); ++i) { - output_shape.push_back((in_dims[i + 2] - 1) * strides[i] + + output_shape.push_back((in_dims[i + 2] - 1) * strides[i] - 2 * paddings[i] + filter_dims[i + 2]); } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); diff --git a/paddle/operators/conv_transpose_op.h b/paddle/operators/conv_transpose_op.h index 4b2bd60437..ab336ad23c 100644 --- a/paddle/operators/conv_transpose_op.h +++ b/paddle/operators/conv_transpose_op.h @@ -62,7 +62,6 @@ class GemmConvTransposeKernel : public framework::OpKernel { Tensor* output = context.Output("Output"); std::vector strides = context.Attr>("strides"); - // Actually, no paddings and groups allowed in conv transpose. std::vector paddings = context.Attr>("paddings"); // TODO(Zhuoyuan): Paddings can be added in future. // groups will alway be disabled in conv2dtranspose. @@ -148,8 +147,8 @@ class GemmConvTransposeKernel : public framework::OpKernel { } else if (filter_shape_vec.size() == 3) { // col2vol: col_matrix -> dy // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w) - col2vol(context.device_context(), col, dilations, strides, - std::vector{0, 0, 0}, &output_batch); + col2vol(context.device_context(), col, dilations, strides, paddings, + &output_batch); } } } @@ -173,7 +172,6 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { if ((!input_grad) && (!filter_grad)) return; std::vector strides = context.Attr>("strides"); - // Actually, no paddings and groups allowed in conv transpose. std::vector paddings = context.Attr>("paddings"); const int batch_size = static_cast(input->dims()[0]); diff --git a/paddle/operators/math/im2col.cu b/paddle/operators/math/im2col.cu index 347df7a0ff..bf78942439 100644 --- a/paddle/operators/math/im2col.cu +++ b/paddle/operators/math/im2col.cu @@ -119,8 +119,8 @@ __global__ void col2im(int n, const T* data_col, int im_height, int im_width, if (index < n) { T val = 0; - int w = index % im_width; - int h = (index / im_width) % im_height; + int w = index % im_width + padding_width; + int h = (index / im_width) % im_height + padding_height; int c = index / (im_width * im_height); // compute the start and end of the output diff --git a/python/paddle/v2/fluid/tests/test_conv2d_op.py b/python/paddle/v2/fluid/tests/test_conv2d_op.py index 907b52c405..2240dc73cd 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_op.py @@ -110,13 +110,30 @@ class TestConv2dOp(OpTest): self.op_type = "conv2d" +class TestWithPad(TestConv2dOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [6, f_c, 3, 3] + + +class TestWithStride(TestConv2dOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [2, 2] + self.input_size = [2, 3, 6, 6] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [6, f_c, 3, 3] + + class TestWithGroup(TestConv2dOp): def init_group(self): self.groups = 3 - def init_op_type(self): - self.op_type = "conv2d" - class TestWith1x1(TestConv2dOp): def init_test_case(self): @@ -127,15 +144,9 @@ class TestWith1x1(TestConv2dOp): f_c = self.input_size[1] / self.groups self.filter_size = [6, f_c, 1, 1] - def init_dilation(self): - self.dilations = [1, 1] - def init_group(self): self.groups = 3 - def init_op_type(self): - self.op_type = "conv2d" - class TestWithDilation(TestConv2dOp): def init_test_case(self): @@ -152,14 +163,19 @@ class TestWithDilation(TestConv2dOp): def init_group(self): self.groups = 3 + +#----------------Conv2dCudnn---------------- +class TestCudnn(TestConv2dOp): def init_op_type(self): - self.op_type = "conv2d" + self.op_type = "conv_cudnn" -#----------------Conv2dCudnn---------------- +class TestCudnnWithPad(TestWithPad): + def init_op_type(self): + self.op_type = "conv_cudnn" -class TestCudnn(TestConv2dOp): +class TestCudnnWithStride(TestWithStride): def init_op_type(self): self.op_type = "conv_cudnn" diff --git a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py index 54349c018c..d7b1f2f2a3 100644 --- a/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv2d_transpose_op.py @@ -4,9 +4,7 @@ from op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param): - # [2, 3, 5, 5] in_n, in_c, in_h, in_w = input_.shape - # [3, 6, 3, 3] f_c, out_c, f_h, f_w = filter_.shape assert in_c == f_c @@ -29,6 +27,7 @@ def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param): j1, j2 = j * stride[0], j * stride[0] + f_w out[n, k, i1:i2, j1:j2] += tmp_out + out = out[:, :, pad[0]:out_h - pad[0], pad[1]:out_w - pad[1]] return out @@ -36,8 +35,6 @@ class TestConv2dTransposeOp(OpTest): def setUp(self): # init as conv transpose self.init_op_type() - - # [2, 3, 5, 5] -> kernel [3, 6, 3, 3] -> output [2, 6, 7, 7] self.init_test_case() conv2dtranspose_param = {'stride': self.stride, 'pad': self.pad} @@ -55,7 +52,6 @@ class TestConv2dTransposeOp(OpTest): self.outputs = {'Output': output} def test_check_output(self): - print 'check output here for', self.op_type self.check_output() def test_check_grad_no_input(self): @@ -88,6 +84,26 @@ class TestConv2dTransposeOp(OpTest): self.op_type = "conv2d_transpose" +class TestWithPad(TestConv2dTransposeOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3] + + +class TestWithStride(TestConv2dTransposeOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [2, 2] + self.dilations = [1, 1] + self.input_size = [2, 3, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3] + + # ------------ test_cudnn ------------ class TestCudnn(TestConv2dTransposeOp): def init_op_type(self): diff --git a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py index 132fe79314..59a32c4082 100644 --- a/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py +++ b/python/paddle/v2/fluid/tests/test_conv3d_transpose_op.py @@ -4,9 +4,7 @@ from op_test import OpTest def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param): - # [2, 3, 5, 5, 5] in_n, in_c, in_d, in_h, in_w = input_.shape - # [3, 6, 3, 3, 3] f_c, out_c, f_d, f_h, f_w = filter_.shape assert in_c == f_c @@ -14,7 +12,6 @@ def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param): out_d = (in_d - 1) * stride[0] + f_d out_h = (in_h - 1) * stride[1] + f_h out_w = (in_w - 1) * stride[2] + f_w - out = np.zeros((in_n, out_c, out_d, out_h, out_w)) for n in range(in_n): @@ -33,6 +30,8 @@ def conv3dtranspose_forward_naive(input_, filter_, conv3dtranspose_param): j1, j2 = j * stride[2], j * stride[2] + f_w out[n, k, d1:d2, i1:i2, j1:j2] += tmp_out + out = out[:, :, pad[0]:out_d - pad[0], pad[1]:out_h - pad[1], pad[2]:out_w - + pad[2]] return out @@ -40,8 +39,6 @@ class TestConv3dTransposeOp(OpTest): def setUp(self): # init as conv transpose self.init_op_type() - - # [2, 3, 5, 5, 5] -> kernel [3, 6, 3, 3, 3] -> output [2, 6, 7, 7, 7] self.init_test_case() conv3dtranspose_param = {'stride': self.stride, 'pad': self.pad} @@ -49,7 +46,6 @@ class TestConv3dTransposeOp(OpTest): filter_ = np.random.random(self.filter_size).astype("float32") output = conv3dtranspose_forward_naive( input_, filter_, conv3dtranspose_param).astype("float32") - # print 'deconv output py', output, output.shape self.inputs = {'Input': input_, 'Filter': filter_} self.attrs = { @@ -60,7 +56,6 @@ class TestConv3dTransposeOp(OpTest): self.outputs = {'Output': output} def test_check_output(self): - print 'check output here' self.check_output() def test_check_grad(self): @@ -85,7 +80,7 @@ class TestConv3dTransposeOp(OpTest): self.pad = [0, 0, 0] self.stride = [1, 1, 1] self.dilations = [1, 1, 1] - self.input_size = [2, 3, 5, 5, 5] # NCHW + self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] @@ -93,5 +88,25 @@ class TestConv3dTransposeOp(OpTest): self.op_type = "conv3d_transpose" +class TestWithPad(TestConv3dTransposeOp): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [1, 1, 1] + self.dilations = [1, 1, 1] + self.input_size = [2, 3, 5, 5, 5] # NCDHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3, 3] + + +class TestWithStride(TestConv3dTransposeOp): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [2, 2, 2] + self.dilations = [1, 1, 1] + self.input_size = [2, 3, 5, 5, 5] # NCDHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 3, 3, 3] + + if __name__ == '__main__': unittest.main() -- GitLab