From 26a6e27afe907a905a71cd8456ce2af37ea39bab Mon Sep 17 00:00:00 2001 From: liym27 <33742067+liym27@users.noreply.github.com> Date: Fri, 8 Nov 2019 13:04:23 +0800 Subject: [PATCH] fix bug in pool/conv/conv_transpose: UpdatePaddingAndDilation, _get_padding_with_SAME and conv2dtranspose_forward_naive. (#20997) * fix bug in pool/conv/conv_transpose: 1. It should be stride[i] not stride[0] in UpdatePaddingAndDilation; 2. fix bug of func _get_padding_with_SAME in test_conv/conv_transpose_op.py; 3. fix bug of the computation process in function conv2dtranspose_forward_naive. test=develop * change test to make the data of different dimensions different. test=develop --- paddle/fluid/operators/conv_op.h | 4 +- paddle/fluid/operators/conv_transpose_op.h | 42 +------ paddle/fluid/operators/pool_op.h | 6 +- .../fluid/tests/unittests/test_conv2d_op.py | 10 +- .../unittests/test_conv2d_transpose_op.py | 20 ++-- .../fluid/tests/unittests/test_conv3d_op.py | 31 ++++- .../unittests/test_conv3d_transpose_op.py | 26 ++-- .../fluid/tests/unittests/test_pool2d_op.py | 14 +++ .../fluid/tests/unittests/test_pool3d_op.py | 111 +++++++++--------- 9 files changed, 123 insertions(+), 141 deletions(-) diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index c1b8d868b29..bb6a805147c 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -83,10 +83,10 @@ inline void UpdatePaddingAndDilation(std::vector* paddings, "Paddings size should be the same or twice as the input data size."); } - // when padding_desc is "VALID" or "SAME" + // when padding_algorithm is "VALID" or "SAME" if (padding_algorithm == "SAME") { for (size_t i = 0; i < data_dims.size(); ++i) { - int out_size = (data_dims[i] + strides[i] - 1) / strides[0]; + int out_size = (data_dims[i] + strides[i] - 1) / strides[i]; int pad_sum = std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0); int pad_0 = pad_sum / 2; diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index fa3bb84b06e..9e1b4f3e362 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/math/depthwise_conv.h" @@ -77,47 +78,6 @@ static void Slice(const framework::ExecutionContext& context, Slice(context, input, out, begin_vec, end_vec, axes_vec); } -inline void UpdatePaddingAndDilation(std::vector* paddings, - std::vector* dilation, - const std::string padding_algorithm, - const framework::DDim data_dims, - const std::vector& strides, - const std::vector& ksize) { - // set padding size == data_dims.size() * 2 - auto data_shape = framework::vectorize(data_dims); - if (paddings->size() == data_dims.size()) { - for (size_t i = 0; i < data_dims.size(); ++i) { - int copy_pad = *(paddings->begin() + 2 * i); - paddings->insert(paddings->begin() + 2 * i + 1, copy_pad); - } - } else { - PADDLE_ENFORCE_EQ( - data_dims.size() * 2, paddings->size(), - "Paddings size should be the same or twice as the input data size."); - } - - // when padding_algorithm is "VALID" or "SAME" - if (padding_algorithm == "SAME") { - for (size_t i = 0; i < data_dims.size(); ++i) { - int out_size = (data_dims[i] + strides[i] - 1) / strides[0]; - int pad_sum = - std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0); - int pad_0 = pad_sum / 2; - int pad_1 = pad_sum - pad_0; - *(paddings->begin() + i * 2) = pad_0; - *(paddings->begin() + i * 2 + 1) = pad_1; - - // dilation - *(dilation->begin() + i) = 1; - } - - } else if (padding_algorithm == "VALID") { - for (auto it = paddings->begin(); it != paddings->end(); it++) { - *it = 0; - } - } -} - // Define Op classes in .h file so that other conv transpose // operator implementations can reuse the code. class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index eec989611ce..3b6246cc887 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -66,7 +66,7 @@ inline void UpdatePadding(std::vector* paddings, const bool global_pooling, // set padding size == data_dims.size() * 2 auto data_shape = framework::vectorize(data_dims); if (paddings->size() == data_dims.size()) { - for (int i = 0; i < data_dims.size(); ++i) { + for (size_t i = 0; i < data_dims.size(); ++i) { int copy_pad = *(paddings->begin() + 2 * i); paddings->insert(paddings->begin() + 2 * i + 1, copy_pad); } @@ -76,10 +76,10 @@ inline void UpdatePadding(std::vector* paddings, const bool global_pooling, "Paddings size should be the same or twice as the pooling size."); } - // when padding_desc is "VALID" or "SAME" + // when padding_algorithm is "VALID" or "SAME" if (padding_algorithm == "SAME") { for (int i = 0; i < data_dims.size(); ++i) { - int out_size = (data_dims[i] + strides[i] - 1) / strides[0]; + int out_size = (data_dims[i] + strides[i] - 1) / strides[i]; int pad_sum = std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0); int pad_0 = pad_sum / 2; diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index e7388f415aa..b5fc10e8683 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -73,11 +73,7 @@ def conv2d_forward_naive(input, pad = [0, 0, 0, 0] elif padding_algorithm == "SAME": dilation = [1, 1] - input_data_shape = [] - if data_format == "NCHW": - input_data_shape = input.shape[2:4] - elif data_format == "NHWC": - input_data_shape = input.shape[1:3] + input_data_shape = input.shape[2:4] pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad_h_0, pad_h_1 = pad[0], pad[0] @@ -775,11 +771,11 @@ class TestConv2dOp_v2(OpTest): def init_test_case(self): self.pad = [0, 0] - self.stride = [1, 1] + self.stride = [1, 2] self.input_size = [2, 3, 5, 5] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 f_c = self.input_size[1] // self.groups - self.filter_size = [6, f_c, 3, 3] + self.filter_size = [6, f_c, 4, 3] def init_dilation(self): self.dilations = [1, 1] diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index 08eb559d957..5e0a3d96836 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -59,12 +59,8 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): if padding_algorithm == "VALID": pad = [0, 0, 0, 0] elif padding_algorithm == "SAME": - dilation = [1, 1] - input_data_shape = [] - if attrs['data_format'] == "NCHW": - input_data_shape = input_.shape[2:4] - elif attrs['data_format'] == "NHWC": - input_data_shape = input_.shape[1:3] + dilations = [1, 1] + input_data_shape = input_.shape[2:4] pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad_h_0, pad_h_1 = pad[0], pad[0] @@ -99,7 +95,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], axis=0) i1, i2 = i * stride[0], i * stride[0] + d_bolck_h - j1, j2 = j * stride[0], j * stride[0] + d_bolck_h + j1, j2 = j * stride[1], j * stride[1] + d_bolck_w out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2: dilations[1]] += tmp_out @@ -231,12 +227,12 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp): class TestWithSAMEPad(TestConv2dTransposeOp): def init_test_case(self): - self.stride = [1, 1] - self.dilations = [1, 1] + self.stride = [2, 1] + self.dilations = [1, 2] self.groups = 1 - self.input_size = [2, 3, 5, 5] # NCHW + self.input_size = [2, 3, 6, 5] # NCHW f_c = self.input_size[1] - self.filter_size = [f_c, 6, 3, 3] + self.filter_size = [f_c, 6, 4, 3] self.padding_algorithm = 'SAME' @@ -429,7 +425,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): class TestCUDNNWithSAMEPad(TestWithSAMEPad): def init_test_case(self): self.pad = [1, 0, 1, 2] - self.stride = [1, 1] + self.stride = [1, 2] self.groups = 1 self.dilations = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index 015d3caaa9a..6d5255cf831 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -75,11 +75,7 @@ def conv3d_forward_naive(input, pad = [0, 0, 0, 0, 0, 0] elif padding_algorithm == "SAME": dilation = [1, 1, 1] - input_data_shape = [] - if data_format == "NCDHW": - input_data_shape = input.shape[2:5] - elif data_format == "NDHWC": - input_data_shape = input.shape[1:4] + input_data_shape = input.shape[2:5] pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad_d_0, pad_d_1 = pad[0], pad[0] @@ -597,11 +593,36 @@ class TestConv3dOp_2(OpTest): class TestConv3dOp_AsyPadding(TestConv3dOp_2): + def init_test_case(self): + self.stride = [1, 1, 2] + self.input_size = [2, 3, 4, 4, 4] # NCDHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [6, f_c, 3, 3, 3] + def init_paddings(self): self.pad = [1, 0, 1, 0, 0, 2] self.padding_algorithm = "EXPLICIT" +class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2): + def init_test_case(self): + self.stride = [1, 1, 2] + self.input_size = [2, 3, 4, 5, 5] # NCDHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [6, f_c, 3, 4, 3] + + def init_paddings(self): + self.pad = [1, 0, 1, 0, 0, 2] + self.padding_algorithm = "EXPLICIT" + + +create_test_padding_SAME_class(TestConv3dOp_DiffDataInDiffDim) +create_test_padding_VALID_class(TestConv3dOp_DiffDataInDiffDim) +create_test_channel_last_class(TestConv3dOp_DiffDataInDiffDim) + + class TestCase1_AsyPadding(TestConv3dOp_2): def init_test_case(self): self.stride = [1, 1, 1] diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py index f90ca27c09e..d44bd1e449e 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py @@ -58,12 +58,8 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): if padding_algorithm == "VALID": pad = [0, 0, 0, 0, 0, 0] elif padding_algorithm == "SAME": - dilation = [1, 1, 1] - input_data_shape = [] - if attrs['data_format'] == "NCHW": - input_data_shape = input_.shape[2:5] - elif attrs['data_format'] == "NHWC": - input_data_shape = input_.shape[1:4] + dilations = [1, 1, 1] + input_data_shape = input_.shape[2:5] pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad_d_0, pad_d_1 = pad[0], pad[0] @@ -226,23 +222,23 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp): class TestWithSAMEPad(TestConv3dTransposeOp): def init_test_case(self): - self.stride = [1, 1, 1] - self.dilations = [1, 1, 1] + self.stride = [1, 1, 2] + self.dilations = [1, 2, 1] self.groups = 1 - self.input_size = [2, 3, 5, 5, 5] # NCDHW + self.input_size = [2, 3, 5, 5, 6] # NCDHW f_c = self.input_size[1] - self.filter_size = [f_c, 6, 3, 3, 3] + self.filter_size = [f_c, 6, 3, 3, 4] self.padding_algorithm = 'SAME' class TestWithVALIDPad(TestConv3dTransposeOp): def init_test_case(self): - self.stride = [1, 1, 1] + self.stride = [2, 1, 1] self.dilations = [1, 1, 1] self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] - self.filter_size = [f_c, 6, 3, 3, 3] + self.filter_size = [f_c, 6, 3, 4, 3] self.padding_algorithm = 'VALID' @@ -398,12 +394,12 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): "core is not compiled with CUDA") class TestCUDNNWithSAMEPad(TestWithSAMEPad): def init_test_case(self): - self.stride = [1, 1, 1] - self.dilations = [1, 1, 1] + self.stride = [1, 1, 2] + self.dilations = [1, 2, 1] self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] - self.filter_size = [f_c, 6, 3, 3, 3] + self.filter_size = [f_c, 6, 3, 4, 3] self.padding_algorithm = 'SAME' def init_op_type(self): diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 3f66d41244e..67c81121741 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -950,6 +950,20 @@ create_test_cudnn_padding_VALID_class(TestCase4_channel_last) create_test_cudnn_padding_VALID_class(TestCase5_channel_last) +class TestCase1_strides(TestCase1): + def init_test_case(self): + self.ksize = [3, 3] + self.strides = [1, 2] + + def init_shape(self): + self.shape = [2, 3, 4, 5] + + +create_test_cudnn_class(TestCase1_strides) +create_test_padding_SAME_class(TestCase1_strides) +create_test_cudnn_padding_SAME_class(TestCase1_strides) + + # ----- test API class TestPool2dAPI(OpTest): def test_api(self): diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index 4865e9d5ab1..488f6d2593e 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -211,6 +211,8 @@ class TestPool3d_Op(OpTest): self.init_kernel_type() self.dtype = np.float32 self.init_test_case() + self.padding_algorithm = "EXPLICIT" + self.init_paddings() self.init_global_pool() self.init_kernel_type() self.init_pool_type() @@ -224,7 +226,7 @@ class TestPool3d_Op(OpTest): output = pool3D_forward_naive( input, self.ksize, self.strides, self.paddings, self.global_pool, self.ceil_mode, self.exclusive, self.adaptive, self.data_format, - self.pool_type).astype(self.dtype) + self.pool_type, self.padding_algorithm).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} @@ -238,7 +240,8 @@ class TestPool3d_Op(OpTest): 'ceil_mode': self.ceil_mode, 'data_format': self.data_format, 'exclusive': self.exclusive, - 'adaptive': self.adaptive + 'adaptive': self.adaptive, + "padding_algorithm": self.padding_algorithm, } self.outputs = {'Out': output} @@ -267,16 +270,18 @@ class TestPool3d_Op(OpTest): self.data_format = "NCDHW" def init_shape(self): - self.shape = [2, 3, 5, 5, 5] + self.shape = [2, 3, 5, 6, 5] def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + self.ksize = [2, 3, 1] + self.strides = [2, 2, 3] + + def init_paddings(self): self.paddings = [0, 0, 0] + self.padding_algorithm = "EXPLICIT" def init_kernel_type(self): self.use_cudnn = False - #pass def init_pool_type(self): self.pool_type = "avg" @@ -301,6 +306,8 @@ class TestCase1(TestPool3d_Op): def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] + + def init_paddings(self): self.paddings = [0, 0, 0] def init_pool_type(self): @@ -312,11 +319,13 @@ class TestCase1(TestPool3d_Op): class TestCase2(TestPool3d_Op): def init_shape(self): - self.shape = [2, 3, 7, 7, 7] + self.shape = [2, 3, 6, 7, 7] def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + self.ksize = [3, 3, 4] + self.strides = [1, 3, 2] + + def init_paddings(self): self.paddings = [1, 1, 1] def init_pool_type(self): @@ -446,32 +455,36 @@ class TestAvgPoolAdaptive(TestCase1): #-------test pool3d with asymmetric padding------ - - class TestPool3d_Op_AsyPadding(TestPool3d_Op): def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + self.ksize = [3, 4, 3] + self.strides = [1, 1, 2] + + def init_paddings(self): self.paddings = [0, 0, 0, 2, 3, 0] def init_shape(self): - self.shape = [2, 3, 5, 5, 5] + self.shape = [2, 3, 5, 5, 6] class TestCase1_AsyPadding(TestCase1): def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + self.ksize = [3, 3, 4] + self.strides = [1, 1, 2] + + def init_paddings(self): self.paddings = [1, 0, 2, 1, 2, 1] def init_shape(self): - self.shape = [2, 3, 7, 7, 7] + self.shape = [2, 3, 7, 7, 6] class TestCase2_AsyPadding(TestCase2): def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] + + def init_paddings(self): self.paddings = [1, 2, 1, 1, 1, 0] def init_shape(self): @@ -482,6 +495,8 @@ class TestCase3_AsyPadding(TestCase3): def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] + + def init_paddings(self): self.paddings = [1, 0, 0, 0, 1, 0] def init_shape(self): @@ -492,6 +507,8 @@ class TestCase4_AsyPadding(TestCase4): def init_test_case(self): self.ksize = [3, 3, 3] self.strides = [1, 1, 1] + + def init_paddings(self): self.paddings = [1, 0, 2, 1, 2, 1] def init_shape(self): @@ -500,9 +517,10 @@ class TestCase4_AsyPadding(TestCase4): class TestCase5_AsyPadding(TestCase5): def init_test_case(self): - self.shape = [2, 7, 7, 7, 3] self.ksize = [3, 3, 3] self.strides = [1, 1, 1] + + def init_paddings(self): self.paddings = [1, 2, 1, 1, 1, 0] def init_shape(self): @@ -534,14 +552,9 @@ class TestAvgInclude_AsyPadding(TestCase2): def init_exclusive(self): self.exclusive = False - def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + def init_paddings(self): self.paddings = [1, 2, 1, 1, 1, 0] - def init_shape(self): - self.shape = [2, 3, 7, 7, 7] - @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") @@ -552,9 +565,7 @@ class TestCUDNNAvgInclude_AsyPadding(TestCase2): def init_exclusive(self): self.exclusive = False - def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + def init_paddings(self): self.paddings = [1, 0, 0, 0, 0, 0] def init_shape(self): @@ -565,14 +576,9 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): def init_adaptive(self): self.adaptive = True - def init_test_case(self): - self.ksize = [3, 3, 3] - self.strides = [1, 1, 1] + def init_paddings(self): self.paddings = [1, 0, 2, 1, 2, 1] - def init_shape(self): - self.shape = [2, 3, 7, 7, 7] - # ------------ test channel_last -------------- class TestPool3d_channel_last(TestPool3d_Op): @@ -580,7 +586,7 @@ class TestPool3d_channel_last(TestPool3d_Op): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 5, 5, 5, 3] + self.shape = [2, 5, 5, 6, 3] class TestCase1_channel_last(TestCase1): @@ -596,7 +602,7 @@ class TestCase2_channel_last(TestCase2): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 7, 7, 7, 3] + self.shape = [2, 7, 7, 5, 3] class TestCase3_channel_last(TestCase3): @@ -604,7 +610,7 @@ class TestCase3_channel_last(TestCase3): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 5, 5, 5, 3] + self.shape = [2, 5, 6, 5, 3] class TestCase4_channel_last(TestCase4): @@ -612,7 +618,7 @@ class TestCase4_channel_last(TestCase4): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 7, 7, 7, 3] + self.shape = [2, 7, 6, 7, 3] class TestCase5_channel_last(TestCase5): @@ -690,7 +696,7 @@ class TestPool3d_Op_AsyPadding_channel_last(TestPool3d_Op_AsyPadding): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 5, 5, 5, 3] + self.shape = [2, 5, 5, 6, 3] class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): @@ -698,7 +704,7 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 7, 7, 7, 3] + self.shape = [2, 7, 6, 8, 3] class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): @@ -706,7 +712,7 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 7, 7, 7, 3] + self.shape = [2, 6, 8, 7, 3] class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): @@ -714,7 +720,7 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 5, 5, 5, 3] + self.shape = [2, 5, 7, 5, 3] class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): @@ -722,7 +728,7 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 7, 7, 7, 3] + self.shape = [2, 6, 7, 7, 3] class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): @@ -730,7 +736,7 @@ class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): self.data_format = "NDHWC" def init_shape(self): - self.shape = [2, 7, 7, 7, 3] + self.shape = [2, 7, 8, 6, 3] create_test_cudnn_class(TestPool3d_Op_AsyPadding_channel_last) @@ -751,9 +757,6 @@ class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): def init_data_format(self): self.data_format = "NDHWC" - def init_shape(self): - self.shape = [2, 7, 7, 7, 3] - @unittest.skipIf(not core.is_compiled_with_cuda(), "core is not compiled with CUDA") @@ -762,9 +765,6 @@ class TestCUDNNAvgInclude_AsyPadding_channel_last( def init_data_format(self): self.data_format = "NDHWC" - def init_shape(self): - self.shape = [2, 5, 5, 5, 3] - class TestAvgPoolAdaptive_AsyPadding_channel_last( TestAvgPoolAdaptive_AsyPadding): @@ -776,12 +776,10 @@ class TestAvgPoolAdaptive_AsyPadding_channel_last( #test padding = SAME VALID - - def create_test_padding_SAME_class(parent): class TestPaddingSMAECase(parent): def init_paddings(self): - self.paddings = [0, 0] + self.paddings = [0, 0, 0] self.padding_algorithm = "SAME" cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp") @@ -812,7 +810,7 @@ def create_test_cudnn_padding_SAME_class(parent): self.use_cudnn = True def init_paddings(self): - self.paddings = [1, 1] + self.paddings = [1, 1, 1] self.padding_algorithm = "SAME" cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp") @@ -838,7 +836,7 @@ create_test_cudnn_padding_SAME_class(TestCase5_channel_last) def create_test_padding_VALID_class(parent): class TestPaddingVALIDCase(parent): def init_paddings(self): - self.paddings = [1, 1] + self.paddings = [1, 1, 1] self.padding_algorithm = "VALID" cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp") @@ -869,7 +867,7 @@ def create_test_cudnn_padding_VALID_class(parent): self.use_cudnn = True def init_paddings(self): - self.paddings = [1, 1] + self.paddings = [1, 1, 1] self.padding_algorithm = "VALID" cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp") @@ -963,6 +961,7 @@ class TestPool3dAPI(OpTest): out_7 = fluid.layers.pool3d( input=input_NDHWC, pool_size=ksize, + pool_stride=[1, 1, 2], pool_type="avg", pool_padding="SAME", use_cudnn=False, @@ -1058,7 +1057,7 @@ class TestPool3dAPI(OpTest): x=x_NDHWC, ksize=ksize, pool_type="avg", - strides=[1, 1, 1], + strides=[1, 1, 2], paddings=[10, 20], padding_algorithm="SAME", data_format="NDHWC")) -- GitLab