未验证 提交 7ab85396 编写于 作者: L liym27 提交者: GitHub

[cherry-pick]fix bug in pool/conv/conv_transpose: UpdatePaddingAndDilation,...

[cherry-pick]fix bug in pool/conv/conv_transpose: UpdatePaddingAndDilation, _get_padding_with_SAME and conv2dtranspose_forward_naive. (#20997) (#21225)

* fix bug in pool/conv/conv_transpose:
    1. It should be stride[i] not stride[0] in UpdatePaddingAndDilation;
    2. fix bug of func  _get_padding_with_SAME in test_conv/conv_transpose_op.py;
    3. fix bug of the computation process in function conv2dtranspose_forward_naive.
    test=release/1.6
上级 cdb81264
...@@ -83,10 +83,10 @@ inline void UpdatePaddingAndDilation(std::vector<int>* paddings, ...@@ -83,10 +83,10 @@ inline void UpdatePaddingAndDilation(std::vector<int>* paddings,
"Paddings size should be the same or twice as the input data size."); "Paddings size should be the same or twice as the input data size.");
} }
// when padding_desc is "VALID" or "SAME" // when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") { if (padding_algorithm == "SAME") {
for (size_t i = 0; i < data_dims.size(); ++i) { for (size_t i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[0]; int out_size = (data_dims[i] + strides[i] - 1) / strides[i];
int pad_sum = int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0); std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
int pad_0 = pad_sum / 2; int pad_0 = pad_sum / 2;
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/math/concat_and_split.h"
#include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/operators/math/depthwise_conv.h"
...@@ -77,47 +78,6 @@ static void Slice(const framework::ExecutionContext& context, ...@@ -77,47 +78,6 @@ static void Slice(const framework::ExecutionContext& context,
Slice<DeviceContext, T, D>(context, input, out, begin_vec, end_vec, axes_vec); Slice<DeviceContext, T, D>(context, input, out, begin_vec, end_vec, axes_vec);
} }
inline void UpdatePaddingAndDilation(std::vector<int>* paddings,
std::vector<int>* dilation,
const std::string padding_algorithm,
const framework::DDim data_dims,
const std::vector<int>& strides,
const std::vector<int>& ksize) {
// set padding size == data_dims.size() * 2
auto data_shape = framework::vectorize<int>(data_dims);
if (paddings->size() == data_dims.size()) {
for (size_t i = 0; i < data_dims.size(); ++i) {
int copy_pad = *(paddings->begin() + 2 * i);
paddings->insert(paddings->begin() + 2 * i + 1, copy_pad);
}
} else {
PADDLE_ENFORCE_EQ(
data_dims.size() * 2, paddings->size(),
"Paddings size should be the same or twice as the input data size.");
}
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (size_t i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[0];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
int pad_0 = pad_sum / 2;
int pad_1 = pad_sum - pad_0;
*(paddings->begin() + i * 2) = pad_0;
*(paddings->begin() + i * 2 + 1) = pad_1;
// dilation
*(dilation->begin() + i) = 1;
}
} else if (padding_algorithm == "VALID") {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
}
// Define Op classes in .h file so that other conv transpose // Define Op classes in .h file so that other conv transpose
// operator implementations can reuse the code. // operator implementations can reuse the code.
class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker {
......
...@@ -76,10 +76,10 @@ inline void UpdatePadding(std::vector<int>* paddings, const bool global_pooling, ...@@ -76,10 +76,10 @@ inline void UpdatePadding(std::vector<int>* paddings, const bool global_pooling,
"Paddings size should be the same or twice as the pooling size."); "Paddings size should be the same or twice as the pooling size.");
} }
// when padding_desc is "VALID" or "SAME" // when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") { if (padding_algorithm == "SAME") {
for (size_t i = 0; i < data_dims.size(); ++i) { for (int i = 0; i < data_dims.size(); ++i) {
int out_size = (data_dims[i] + strides[i] - 1) / strides[0]; int out_size = (data_dims[i] + strides[i] - 1) / strides[i];
int pad_sum = int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0); std::max((out_size - 1) * strides[i] + ksize[i] - data_shape[i], 0);
int pad_0 = pad_sum / 2; int pad_0 = pad_sum / 2;
......
...@@ -73,11 +73,7 @@ def conv2d_forward_naive(input, ...@@ -73,11 +73,7 @@ def conv2d_forward_naive(input,
pad = [0, 0, 0, 0] pad = [0, 0, 0, 0]
elif padding_algorithm == "SAME": elif padding_algorithm == "SAME":
dilation = [1, 1] dilation = [1, 1]
input_data_shape = [] input_data_shape = input.shape[2:4]
if data_format == "NCHW":
input_data_shape = input.shape[2:4]
elif data_format == "NHWC":
input_data_shape = input.shape[1:3]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_h_0, pad_h_1 = pad[0], pad[0] pad_h_0, pad_h_1 = pad[0], pad[0]
...@@ -775,11 +771,11 @@ class TestConv2dOp_v2(OpTest): ...@@ -775,11 +771,11 @@ class TestConv2dOp_v2(OpTest):
def init_test_case(self): def init_test_case(self):
self.pad = [0, 0] self.pad = [0, 0]
self.stride = [1, 1] self.stride = [1, 2]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0 assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3] self.filter_size = [6, f_c, 4, 3]
def init_dilation(self): def init_dilation(self):
self.dilations = [1, 1] self.dilations = [1, 1]
......
...@@ -59,12 +59,8 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): ...@@ -59,12 +59,8 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
if padding_algorithm == "VALID": if padding_algorithm == "VALID":
pad = [0, 0, 0, 0] pad = [0, 0, 0, 0]
elif padding_algorithm == "SAME": elif padding_algorithm == "SAME":
dilation = [1, 1] dilations = [1, 1]
input_data_shape = [] input_data_shape = input_.shape[2:4]
if attrs['data_format'] == "NCHW":
input_data_shape = input_.shape[2:4]
elif attrs['data_format'] == "NHWC":
input_data_shape = input_.shape[1:3]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_h_0, pad_h_1 = pad[0], pad[0] pad_h_0, pad_h_1 = pad[0], pad[0]
...@@ -99,7 +95,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): ...@@ -99,7 +95,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :],
axis=0) axis=0)
i1, i2 = i * stride[0], i * stride[0] + d_bolck_h i1, i2 = i * stride[0], i * stride[0] + d_bolck_h
j1, j2 = j * stride[0], j * stride[0] + d_bolck_h j1, j2 = j * stride[1], j * stride[1] + d_bolck_w
out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2: out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2:
dilations[1]] += tmp_out dilations[1]] += tmp_out
...@@ -231,12 +227,12 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp): ...@@ -231,12 +227,12 @@ class TestWithAsymmetricPad(TestConv2dTransposeOp):
class TestWithSAMEPad(TestConv2dTransposeOp): class TestWithSAMEPad(TestConv2dTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1] self.stride = [2, 1]
self.dilations = [1, 1] self.dilations = [1, 2]
self.groups = 1 self.groups = 1
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 6, 5] # NCHW
f_c = self.input_size[1] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 4, 3]
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
...@@ -429,7 +425,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): ...@@ -429,7 +425,7 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
class TestCUDNNWithSAMEPad(TestWithSAMEPad): class TestCUDNNWithSAMEPad(TestWithSAMEPad):
def init_test_case(self): def init_test_case(self):
self.pad = [1, 0, 1, 2] self.pad = [1, 0, 1, 2]
self.stride = [1, 1] self.stride = [1, 2]
self.groups = 1 self.groups = 1
self.dilations = [1, 1] self.dilations = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW self.input_size = [2, 3, 5, 5] # NCHW
......
...@@ -75,11 +75,7 @@ def conv3d_forward_naive(input, ...@@ -75,11 +75,7 @@ def conv3d_forward_naive(input,
pad = [0, 0, 0, 0, 0, 0] pad = [0, 0, 0, 0, 0, 0]
elif padding_algorithm == "SAME": elif padding_algorithm == "SAME":
dilation = [1, 1, 1] dilation = [1, 1, 1]
input_data_shape = [] input_data_shape = input.shape[2:5]
if data_format == "NCDHW":
input_data_shape = input.shape[2:5]
elif data_format == "NDHWC":
input_data_shape = input.shape[1:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_d_0, pad_d_1 = pad[0], pad[0] pad_d_0, pad_d_1 = pad[0], pad[0]
...@@ -597,11 +593,36 @@ class TestConv3dOp_2(OpTest): ...@@ -597,11 +593,36 @@ class TestConv3dOp_2(OpTest):
class TestConv3dOp_AsyPadding(TestConv3dOp_2): class TestConv3dOp_AsyPadding(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 4, 4] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3, 3]
def init_paddings(self): def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2] self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT" self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_DiffDataInDiffDim(TestConv3dOp_2):
def init_test_case(self):
self.stride = [1, 1, 2]
self.input_size = [2, 3, 4, 5, 5] # NCDHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 4, 3]
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
create_test_padding_SAME_class(TestConv3dOp_DiffDataInDiffDim)
create_test_padding_VALID_class(TestConv3dOp_DiffDataInDiffDim)
create_test_channel_last_class(TestConv3dOp_DiffDataInDiffDim)
class TestCase1_AsyPadding(TestConv3dOp_2): class TestCase1_AsyPadding(TestConv3dOp_2):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 1]
......
...@@ -58,12 +58,8 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): ...@@ -58,12 +58,8 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs):
if padding_algorithm == "VALID": if padding_algorithm == "VALID":
pad = [0, 0, 0, 0, 0, 0] pad = [0, 0, 0, 0, 0, 0]
elif padding_algorithm == "SAME": elif padding_algorithm == "SAME":
dilation = [1, 1, 1] dilations = [1, 1, 1]
input_data_shape = [] input_data_shape = input_.shape[2:5]
if attrs['data_format'] == "NCHW":
input_data_shape = input_.shape[2:5]
elif attrs['data_format'] == "NHWC":
input_data_shape = input_.shape[1:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride) pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_d_0, pad_d_1 = pad[0], pad[0] pad_d_0, pad_d_1 = pad[0], pad[0]
...@@ -226,23 +222,23 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp): ...@@ -226,23 +222,23 @@ class TestWithAsymmetricPad(TestConv3dTransposeOp):
class TestWithSAMEPad(TestConv3dTransposeOp): class TestWithSAMEPad(TestConv3dTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 2]
self.dilations = [1, 1, 1] self.dilations = [1, 2, 1]
self.groups = 1 self.groups = 1
self.input_size = [2, 3, 5, 5, 5] # NCDHW self.input_size = [2, 3, 5, 5, 6] # NCDHW
f_c = self.input_size[1] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 4]
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
class TestWithVALIDPad(TestConv3dTransposeOp): class TestWithVALIDPad(TestConv3dTransposeOp):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [2, 1, 1]
self.dilations = [1, 1, 1] self.dilations = [1, 1, 1]
self.groups = 1 self.groups = 1
self.input_size = [2, 3, 5, 5, 5] # NCDHW self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 4, 3]
self.padding_algorithm = 'VALID' self.padding_algorithm = 'VALID'
...@@ -398,12 +394,12 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad): ...@@ -398,12 +394,12 @@ class TestCUDNNWithAsymmetricPad(TestWithAsymmetricPad):
"core is not compiled with CUDA") "core is not compiled with CUDA")
class TestCUDNNWithSAMEPad(TestWithSAMEPad): class TestCUDNNWithSAMEPad(TestWithSAMEPad):
def init_test_case(self): def init_test_case(self):
self.stride = [1, 1, 1] self.stride = [1, 1, 2]
self.dilations = [1, 1, 1] self.dilations = [1, 2, 1]
self.groups = 1 self.groups = 1
self.input_size = [2, 3, 5, 5, 5] # NCDHW self.input_size = [2, 3, 5, 5, 5] # NCDHW
f_c = self.input_size[1] f_c = self.input_size[1]
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 4, 3]
self.padding_algorithm = 'SAME' self.padding_algorithm = 'SAME'
def init_op_type(self): def init_op_type(self):
......
...@@ -950,6 +950,20 @@ create_test_cudnn_padding_VALID_class(TestCase4_channel_last) ...@@ -950,6 +950,20 @@ create_test_cudnn_padding_VALID_class(TestCase4_channel_last)
create_test_cudnn_padding_VALID_class(TestCase5_channel_last) create_test_cudnn_padding_VALID_class(TestCase5_channel_last)
class TestCase1_strides(TestCase1):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 2]
def init_shape(self):
self.shape = [2, 3, 4, 5]
create_test_cudnn_class(TestCase1_strides)
create_test_padding_SAME_class(TestCase1_strides)
create_test_cudnn_padding_SAME_class(TestCase1_strides)
# ----- test API # ----- test API
class TestPool2dAPI(OpTest): class TestPool2dAPI(OpTest):
def test_api(self): def test_api(self):
......
...@@ -211,6 +211,8 @@ class TestPool3d_Op(OpTest): ...@@ -211,6 +211,8 @@ class TestPool3d_Op(OpTest):
self.init_kernel_type() self.init_kernel_type()
self.dtype = np.float32 self.dtype = np.float32
self.init_test_case() self.init_test_case()
self.padding_algorithm = "EXPLICIT"
self.init_paddings()
self.init_global_pool() self.init_global_pool()
self.init_kernel_type() self.init_kernel_type()
self.init_pool_type() self.init_pool_type()
...@@ -224,7 +226,7 @@ class TestPool3d_Op(OpTest): ...@@ -224,7 +226,7 @@ class TestPool3d_Op(OpTest):
output = pool3D_forward_naive( output = pool3D_forward_naive(
input, self.ksize, self.strides, self.paddings, self.global_pool, input, self.ksize, self.strides, self.paddings, self.global_pool,
self.ceil_mode, self.exclusive, self.adaptive, self.data_format, self.ceil_mode, self.exclusive, self.adaptive, self.data_format,
self.pool_type).astype(self.dtype) self.pool_type, self.padding_algorithm).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
...@@ -238,7 +240,8 @@ class TestPool3d_Op(OpTest): ...@@ -238,7 +240,8 @@ class TestPool3d_Op(OpTest):
'ceil_mode': self.ceil_mode, 'ceil_mode': self.ceil_mode,
'data_format': self.data_format, 'data_format': self.data_format,
'exclusive': self.exclusive, 'exclusive': self.exclusive,
'adaptive': self.adaptive 'adaptive': self.adaptive,
"padding_algorithm": self.padding_algorithm,
} }
self.outputs = {'Out': output} self.outputs = {'Out': output}
...@@ -267,16 +270,18 @@ class TestPool3d_Op(OpTest): ...@@ -267,16 +270,18 @@ class TestPool3d_Op(OpTest):
self.data_format = "NCDHW" self.data_format = "NCDHW"
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 5, 5, 5] self.shape = [2, 3, 5, 6, 5]
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [2, 3, 1]
self.strides = [1, 1, 1] self.strides = [2, 2, 3]
def init_paddings(self):
self.paddings = [0, 0, 0] self.paddings = [0, 0, 0]
self.padding_algorithm = "EXPLICIT"
def init_kernel_type(self): def init_kernel_type(self):
self.use_cudnn = False self.use_cudnn = False
#pass
def init_pool_type(self): def init_pool_type(self):
self.pool_type = "avg" self.pool_type = "avg"
...@@ -301,6 +306,8 @@ class TestCase1(TestPool3d_Op): ...@@ -301,6 +306,8 @@ class TestCase1(TestPool3d_Op):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
self.strides = [1, 1, 1] self.strides = [1, 1, 1]
def init_paddings(self):
self.paddings = [0, 0, 0] self.paddings = [0, 0, 0]
def init_pool_type(self): def init_pool_type(self):
...@@ -312,11 +319,13 @@ class TestCase1(TestPool3d_Op): ...@@ -312,11 +319,13 @@ class TestCase1(TestPool3d_Op):
class TestCase2(TestPool3d_Op): class TestCase2(TestPool3d_Op):
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 7, 7, 7] self.shape = [2, 3, 6, 7, 7]
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 3, 4]
self.strides = [1, 1, 1] self.strides = [1, 3, 2]
def init_paddings(self):
self.paddings = [1, 1, 1] self.paddings = [1, 1, 1]
def init_pool_type(self): def init_pool_type(self):
...@@ -446,32 +455,36 @@ class TestAvgPoolAdaptive(TestCase1): ...@@ -446,32 +455,36 @@ class TestAvgPoolAdaptive(TestCase1):
#-------test pool3d with asymmetric padding------ #-------test pool3d with asymmetric padding------
class TestPool3d_Op_AsyPadding(TestPool3d_Op): class TestPool3d_Op_AsyPadding(TestPool3d_Op):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 4, 3]
self.strides = [1, 1, 1] self.strides = [1, 1, 2]
def init_paddings(self):
self.paddings = [0, 0, 0, 2, 3, 0] self.paddings = [0, 0, 0, 2, 3, 0]
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 5, 5, 5] self.shape = [2, 3, 5, 5, 6]
class TestCase1_AsyPadding(TestCase1): class TestCase1_AsyPadding(TestCase1):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 3, 4]
self.strides = [1, 1, 1] self.strides = [1, 1, 2]
def init_paddings(self):
self.paddings = [1, 0, 2, 1, 2, 1] self.paddings = [1, 0, 2, 1, 2, 1]
def init_shape(self): def init_shape(self):
self.shape = [2, 3, 7, 7, 7] self.shape = [2, 3, 7, 7, 6]
class TestCase2_AsyPadding(TestCase2): class TestCase2_AsyPadding(TestCase2):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
self.strides = [1, 1, 1] self.strides = [1, 1, 1]
def init_paddings(self):
self.paddings = [1, 2, 1, 1, 1, 0] self.paddings = [1, 2, 1, 1, 1, 0]
def init_shape(self): def init_shape(self):
...@@ -482,6 +495,8 @@ class TestCase3_AsyPadding(TestCase3): ...@@ -482,6 +495,8 @@ class TestCase3_AsyPadding(TestCase3):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
self.strides = [1, 1, 1] self.strides = [1, 1, 1]
def init_paddings(self):
self.paddings = [1, 0, 0, 0, 1, 0] self.paddings = [1, 0, 0, 0, 1, 0]
def init_shape(self): def init_shape(self):
...@@ -492,6 +507,8 @@ class TestCase4_AsyPadding(TestCase4): ...@@ -492,6 +507,8 @@ class TestCase4_AsyPadding(TestCase4):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
self.strides = [1, 1, 1] self.strides = [1, 1, 1]
def init_paddings(self):
self.paddings = [1, 0, 2, 1, 2, 1] self.paddings = [1, 0, 2, 1, 2, 1]
def init_shape(self): def init_shape(self):
...@@ -500,9 +517,10 @@ class TestCase4_AsyPadding(TestCase4): ...@@ -500,9 +517,10 @@ class TestCase4_AsyPadding(TestCase4):
class TestCase5_AsyPadding(TestCase5): class TestCase5_AsyPadding(TestCase5):
def init_test_case(self): def init_test_case(self):
self.shape = [2, 7, 7, 7, 3]
self.ksize = [3, 3, 3] self.ksize = [3, 3, 3]
self.strides = [1, 1, 1] self.strides = [1, 1, 1]
def init_paddings(self):
self.paddings = [1, 2, 1, 1, 1, 0] self.paddings = [1, 2, 1, 1, 1, 0]
def init_shape(self): def init_shape(self):
...@@ -534,14 +552,9 @@ class TestAvgInclude_AsyPadding(TestCase2): ...@@ -534,14 +552,9 @@ class TestAvgInclude_AsyPadding(TestCase2):
def init_exclusive(self): def init_exclusive(self):
self.exclusive = False self.exclusive = False
def init_test_case(self): def init_paddings(self):
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 2, 1, 1, 1, 0] self.paddings = [1, 2, 1, 1, 1, 0]
def init_shape(self):
self.shape = [2, 3, 7, 7, 7]
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
...@@ -552,9 +565,7 @@ class TestCUDNNAvgInclude_AsyPadding(TestCase2): ...@@ -552,9 +565,7 @@ class TestCUDNNAvgInclude_AsyPadding(TestCase2):
def init_exclusive(self): def init_exclusive(self):
self.exclusive = False self.exclusive = False
def init_test_case(self): def init_paddings(self):
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 0, 0, 0, 0, 0] self.paddings = [1, 0, 0, 0, 0, 0]
def init_shape(self): def init_shape(self):
...@@ -565,14 +576,9 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1): ...@@ -565,14 +576,9 @@ class TestAvgPoolAdaptive_AsyPadding(TestCase1):
def init_adaptive(self): def init_adaptive(self):
self.adaptive = True self.adaptive = True
def init_test_case(self): def init_paddings(self):
self.ksize = [3, 3, 3]
self.strides = [1, 1, 1]
self.paddings = [1, 0, 2, 1, 2, 1] self.paddings = [1, 0, 2, 1, 2, 1]
def init_shape(self):
self.shape = [2, 3, 7, 7, 7]
# ------------ test channel_last -------------- # ------------ test channel_last --------------
class TestPool3d_channel_last(TestPool3d_Op): class TestPool3d_channel_last(TestPool3d_Op):
...@@ -580,7 +586,7 @@ class TestPool3d_channel_last(TestPool3d_Op): ...@@ -580,7 +586,7 @@ class TestPool3d_channel_last(TestPool3d_Op):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 5, 5, 5, 3] self.shape = [2, 5, 5, 6, 3]
class TestCase1_channel_last(TestCase1): class TestCase1_channel_last(TestCase1):
...@@ -596,7 +602,7 @@ class TestCase2_channel_last(TestCase2): ...@@ -596,7 +602,7 @@ class TestCase2_channel_last(TestCase2):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 7, 7, 5, 3]
class TestCase3_channel_last(TestCase3): class TestCase3_channel_last(TestCase3):
...@@ -604,7 +610,7 @@ class TestCase3_channel_last(TestCase3): ...@@ -604,7 +610,7 @@ class TestCase3_channel_last(TestCase3):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 5, 5, 5, 3] self.shape = [2, 5, 6, 5, 3]
class TestCase4_channel_last(TestCase4): class TestCase4_channel_last(TestCase4):
...@@ -612,7 +618,7 @@ class TestCase4_channel_last(TestCase4): ...@@ -612,7 +618,7 @@ class TestCase4_channel_last(TestCase4):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 7, 6, 7, 3]
class TestCase5_channel_last(TestCase5): class TestCase5_channel_last(TestCase5):
...@@ -690,7 +696,7 @@ class TestPool3d_Op_AsyPadding_channel_last(TestPool3d_Op_AsyPadding): ...@@ -690,7 +696,7 @@ class TestPool3d_Op_AsyPadding_channel_last(TestPool3d_Op_AsyPadding):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 5, 5, 5, 3] self.shape = [2, 5, 5, 6, 3]
class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding):
...@@ -698,7 +704,7 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding): ...@@ -698,7 +704,7 @@ class TestCase1_AsyPadding_channel_last(TestCase1_AsyPadding):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 7, 6, 8, 3]
class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding):
...@@ -706,7 +712,7 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding): ...@@ -706,7 +712,7 @@ class TestCase2_AsyPadding_channel_last(TestCase2_AsyPadding):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 6, 8, 7, 3]
class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding):
...@@ -714,7 +720,7 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding): ...@@ -714,7 +720,7 @@ class TestCase3_AsyPadding_channel_last(TestCase3_AsyPadding):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 5, 5, 5, 3] self.shape = [2, 5, 7, 5, 3]
class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding):
...@@ -722,7 +728,7 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding): ...@@ -722,7 +728,7 @@ class TestCase4_AsyPadding_channel_last(TestCase4_AsyPadding):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 6, 7, 7, 3]
class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
...@@ -730,7 +736,7 @@ class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding): ...@@ -730,7 +736,7 @@ class TestCase5_AsyPadding_channel_last(TestCase5_AsyPadding):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self): def init_shape(self):
self.shape = [2, 7, 7, 7, 3] self.shape = [2, 7, 8, 6, 3]
create_test_cudnn_class(TestPool3d_Op_AsyPadding_channel_last) create_test_cudnn_class(TestPool3d_Op_AsyPadding_channel_last)
...@@ -751,9 +757,6 @@ class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding): ...@@ -751,9 +757,6 @@ class TestAvgInclude_AsyPadding_channel_last(TestAvgInclude_AsyPadding):
def init_data_format(self): def init_data_format(self):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self):
self.shape = [2, 7, 7, 7, 3]
@unittest.skipIf(not core.is_compiled_with_cuda(), @unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA") "core is not compiled with CUDA")
...@@ -762,9 +765,6 @@ class TestCUDNNAvgInclude_AsyPadding_channel_last( ...@@ -762,9 +765,6 @@ class TestCUDNNAvgInclude_AsyPadding_channel_last(
def init_data_format(self): def init_data_format(self):
self.data_format = "NDHWC" self.data_format = "NDHWC"
def init_shape(self):
self.shape = [2, 5, 5, 5, 3]
class TestAvgPoolAdaptive_AsyPadding_channel_last( class TestAvgPoolAdaptive_AsyPadding_channel_last(
TestAvgPoolAdaptive_AsyPadding): TestAvgPoolAdaptive_AsyPadding):
...@@ -776,12 +776,10 @@ class TestAvgPoolAdaptive_AsyPadding_channel_last( ...@@ -776,12 +776,10 @@ class TestAvgPoolAdaptive_AsyPadding_channel_last(
#test padding = SAME VALID #test padding = SAME VALID
def create_test_padding_SAME_class(parent): def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent): class TestPaddingSMAECase(parent):
def init_paddings(self): def init_paddings(self):
self.paddings = [0, 0] self.paddings = [0, 0, 0]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp") cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
...@@ -812,7 +810,7 @@ def create_test_cudnn_padding_SAME_class(parent): ...@@ -812,7 +810,7 @@ def create_test_cudnn_padding_SAME_class(parent):
self.use_cudnn = True self.use_cudnn = True
def init_paddings(self): def init_paddings(self):
self.paddings = [1, 1] self.paddings = [1, 1, 1]
self.padding_algorithm = "SAME" self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp") cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
...@@ -838,7 +836,7 @@ create_test_cudnn_padding_SAME_class(TestCase5_channel_last) ...@@ -838,7 +836,7 @@ create_test_cudnn_padding_SAME_class(TestCase5_channel_last)
def create_test_padding_VALID_class(parent): def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent): class TestPaddingVALIDCase(parent):
def init_paddings(self): def init_paddings(self):
self.paddings = [1, 1] self.paddings = [1, 1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp") cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
...@@ -869,7 +867,7 @@ def create_test_cudnn_padding_VALID_class(parent): ...@@ -869,7 +867,7 @@ def create_test_cudnn_padding_VALID_class(parent):
self.use_cudnn = True self.use_cudnn = True
def init_paddings(self): def init_paddings(self):
self.paddings = [1, 1] self.paddings = [1, 1, 1]
self.padding_algorithm = "VALID" self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp") cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
...@@ -963,6 +961,7 @@ class TestPool3dAPI(OpTest): ...@@ -963,6 +961,7 @@ class TestPool3dAPI(OpTest):
out_7 = fluid.layers.pool3d( out_7 = fluid.layers.pool3d(
input=input_NDHWC, input=input_NDHWC,
pool_size=ksize, pool_size=ksize,
pool_stride=[1, 1, 2],
pool_type="avg", pool_type="avg",
pool_padding="SAME", pool_padding="SAME",
use_cudnn=False, use_cudnn=False,
...@@ -1058,7 +1057,7 @@ class TestPool3dAPI(OpTest): ...@@ -1058,7 +1057,7 @@ class TestPool3dAPI(OpTest):
x=x_NDHWC, x=x_NDHWC,
ksize=ksize, ksize=ksize,
pool_type="avg", pool_type="avg",
strides=[1, 1, 1], strides=[1, 1, 2],
paddings=[10, 20], paddings=[10, 20],
padding_algorithm="SAME", padding_algorithm="SAME",
data_format="NDHWC")) data_format="NDHWC"))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册