From be6ecec46f7b2b1d661a101744ee838dffc519ea Mon Sep 17 00:00:00 2001 From: minqiyang Date: Fri, 10 Aug 2018 00:08:24 +0800 Subject: [PATCH] Fix unittests' division issues --- python/paddle/fluid/layers/nn.py | 24 +++++++++---------- .../fluid/tests/unittests/test_conv3d_op.py | 18 +++++++------- .../fluid/tests/unittests/test_infer_shape.py | 13 +++++----- .../fluid/tests/unittests/test_layers.py | 4 ++-- .../fluid/tests/unittests/test_pool3d_op.py | 24 +++++++++---------- 5 files changed, 42 insertions(+), 41 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d1ae284d54e..37e860b08ce 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -550,7 +550,7 @@ def dynamic_lstmp(input, """ helper = LayerHelper('lstmp', **locals()) - size = size / 4 + size = size // 4 weight = helper.create_parameter( attr=helper.param_attr, shape=[proj_size, 4 * size], dtype=dtype) proj_weight = helper.create_parameter( @@ -778,7 +778,7 @@ def gru_unit(input, helper = LayerHelper('gru_unit', **locals()) dtype = helper.input_dtype() - size = size / 3 + size = size // 3 # create weight weight = helper.create_parameter( @@ -1258,7 +1258,7 @@ def sequence_conv(input, outputs={"Out": pre_bias}, attrs={ 'contextStride': filter_stride, - 'contextStart': -int(filter_size / 2), + 'contextStart': -int(filter_size // 2), 'contextLength': filter_size }) pre_act = helper.append_bias_op(pre_bias) @@ -1487,7 +1487,7 @@ def conv2d(input, else: if num_channels % groups != 0: raise ValueError("num_channels must be divisible by groups.") - num_filter_channels = num_channels / groups + num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 2, 'filter_size') stride = utils.convert_to_list(stride, 2, 'stride') @@ -1649,7 +1649,7 @@ def conv3d(input, else: if num_channels % groups != 0: raise ValueError("num_channels must be divisible by groups.") - num_filter_channels = num_channels / groups + num_filter_channels = num_channels // groups filter_size = utils.convert_to_list(filter_size, 3, 'filter_size') stride = utils.convert_to_list(stride, 3, 'stride') @@ -2384,16 +2384,16 @@ def conv2d_transpose(input, w_in = input.shape[3] filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + 2 * - padding[0] - 1) / dilation[0] + 1 + padding[0] - 1) // dilation[0] + 1 filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + 2 * - padding[1] - 1) / dilation[1] + 1 + padding[1] - 1) // dilation[1] + 1 filter_size = [filter_size_h, filter_size_w] else: filter_size = utils.convert_to_list(filter_size, 2, 'conv2d_transpose.filter_size') groups = 1 if groups is None else groups - filter_shape = [input_channel, num_filters / groups] + filter_size + filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) @@ -2551,18 +2551,18 @@ def conv3d_transpose(input, w_in = input.shape[4] filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + 2 * - padding[0] - 1) / dilation[0] + 1 + padding[0] - 1) // dilation[0] + 1 filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + 2 * - padding[1] - 1) / dilation[1] + 1 + padding[1] - 1) // dilation[1] + 1 filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + 2 * - padding[2] - 1) / dilation[2] + 1 + padding[2] - 1) // dilation[2] + 1 filter_size = [filter_size_d, filter_size_h, filter_size_w] else: filter_size = utils.convert_to_list(filter_size, 3, 'conv3d_transpose.filter_size') groups = 1 if groups is None else groups - filter_shape = [input_channel, num_filters / groups] + filter_size + filter_shape = [input_channel, num_filters // groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index dd4ef7cc94e..e473ebacea1 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -24,14 +24,14 @@ def conv3d_forward_naive(input, filter, group, conv_param): out_c, f_c, f_d, f_h, f_w = filter.shape assert f_c * group == in_c assert np.mod(out_c, group) == 0 - sub_out_c = out_c / group + sub_out_c = out_c // group stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[ 'dilations'] - out_d = 1 + (in_d + 2 * pad[0] - (dilation[0] * (f_d - 1) + 1)) / stride[0] - out_h = 1 + (in_h + 2 * pad[1] - (dilation[1] * (f_h - 1) + 1)) / stride[1] - out_w = 1 + (in_w + 2 * pad[2] - (dilation[2] * (f_w - 1) + 1)) / stride[2] + out_d = 1 + (in_d + 2 * pad[0] - (dilation[0] * (f_d - 1) + 1)) // stride[0] + out_h = 1 + (in_h + 2 * pad[1] - (dilation[1] * (f_h - 1) + 1)) // stride[1] + out_w = 1 + (in_w + 2 * pad[2] - (dilation[2] * (f_w - 1) + 1)) // stride[2] out = np.zeros((in_n, out_c, out_d, out_h, out_w)) @@ -166,7 +166,7 @@ class TestConv3dOp(OpTest): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] # NCDHW assert np.mod(self.input_size[1], self.groups) == 0 - f_c = self.input_size[1] / self.groups + f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3, 3] def init_dilation(self): @@ -185,7 +185,7 @@ class TestCase1(TestConv3dOp): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] # NCDHW assert np.mod(self.input_size[1], self.groups) == 0 - f_c = self.input_size[1] / self.groups + f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 3, 3, 3] @@ -205,7 +205,7 @@ class TestWith1x1(TestConv3dOp): self.stride = [1, 1, 1] self.input_size = [2, 3, 4, 4, 4] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 - f_c = self.input_size[1] / self.groups + f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 1, 1, 1] def init_dilation(self): @@ -221,7 +221,7 @@ class TestWithInput1x1Filter1x1(TestConv3dOp): self.stride = [1, 1, 1] self.input_size = [2, 3, 1, 1, 1] # NCHW assert np.mod(self.input_size[1], self.groups) == 0 - f_c = self.input_size[1] / self.groups + f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 1, 1, 1] def init_dilation(self): @@ -237,7 +237,7 @@ class TestWithDilation(TestConv3dOp): self.stride = [1, 1, 1] self.input_size = [2, 3, 6, 6, 6] # NCDHW assert np.mod(self.input_size[1], self.groups) == 0 - f_c = self.input_size[1] / self.groups + f_c = self.input_size[1] // self.groups self.filter_size = [6, f_c, 2, 2, 2] def init_dilation(self): diff --git a/python/paddle/fluid/tests/unittests/test_infer_shape.py b/python/paddle/fluid/tests/unittests/test_infer_shape.py index 699a2d42467..ede51f65503 100644 --- a/python/paddle/fluid/tests/unittests/test_infer_shape.py +++ b/python/paddle/fluid/tests/unittests/test_infer_shape.py @@ -14,6 +14,7 @@ import unittest +import six import paddle.fluid.core as core @@ -27,14 +28,14 @@ class TestInferShape(unittest.TestCase): shape = [10, 20] # prepare input/output - x1 = block.var("x1") + x1 = block.var(six.b("x1")) x1.set_type(core.VarDesc.VarType.LOD_TENSOR) x1.set_shape(shape) - x2 = block.var("x2") + x2 = block.var(six.b("x2")) x2.set_type(core.VarDesc.VarType.LOD_TENSOR) x2.set_shape(shape) - out = block.var("out") + out = block.var(six.b("out")) out.set_type(core.VarDesc.VarType.LOD_TENSOR) # prepare the operator @@ -57,14 +58,14 @@ class TestInferShape(unittest.TestCase): y_shape = [20, 30] # prepare input/output - x1 = block.var("x") + x1 = block.var(six.b("x")) x1.set_type(core.VarDesc.VarType.LOD_TENSOR) x1.set_shape(x_shape) - x2 = block.var("y") + x2 = block.var(six.b("y")) x2.set_type(core.VarDesc.VarType.LOD_TENSOR) x2.set_shape(y_shape) - out = block.var("out") + out = block.var(six.b("out")) out.set_type(core.VarDesc.VarType.LOD_TENSOR) # prepare the operator diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 8f2dac786d0..aae5a24f6ca 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -158,7 +158,7 @@ class TestBook(unittest.TestCase): input=crf_decode, label=label, chunk_scheme="IOB", - num_chunk_types=(label_dict_len - 1) / 2) + num_chunk_types=(label_dict_len - 1) // 2) self.assertFalse(crf is None) self.assertFalse(crf_decode is None) @@ -285,7 +285,7 @@ class TestBook(unittest.TestCase): name='word_{0}'.format(i), shape=[1], dtype='int64')) dict_size = 10000 - label_word = int(window_size / 2) + 1 + label_word = int(window_size // 2) + 1 embs = [] for i in range(window_size): diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index 92c64b37921..a358c849911 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -29,14 +29,14 @@ def max_pool3D_forward_naive(x, if global_pool == 1: ksize = [D, H, W] D_out = (D - ksize[0] + 2 * paddings[0] + strides[0] - 1 - ) / strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 * - paddings[0]) / strides[0] + 1 + ) // strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 * + paddings[0]) // strides[0] + 1 H_out = (H - ksize[1] + 2 * paddings[1] + strides[1] - 1 - ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * - paddings[1]) / strides[1] + 1 + ) // strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * + paddings[1]) // strides[1] + 1 W_out = (W - ksize[2] + 2 * paddings[2] + strides[2] - 1 - ) / strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * - paddings[2]) / strides[2] + 1 + ) // strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * + paddings[2]) // strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) @@ -63,14 +63,14 @@ def avg_pool3D_forward_naive(x, if global_pool == 1: ksize = [D, H, W] D_out = (D - ksize[0] + 2 * paddings[0] + strides[0] - 1 - ) / strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 * - paddings[0]) / strides[0] + 1 + ) // strides[0] + 1 if ceil_mode else (H - ksize[0] + 2 * + paddings[0]) // strides[0] + 1 H_out = (H - ksize[1] + 2 * paddings[1] + strides[1] - 1 - ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * - paddings[1]) / strides[1] + 1 + ) // strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * + paddings[1]) // strides[1] + 1 W_out = (W - ksize[2] + 2 * paddings[2] + strides[2] - 1 - ) / strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * - paddings[2]) / strides[2] + 1 + ) // strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * + paddings[2]) // strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) -- GitLab