From c8d877195b9763ec2da9eb480bb6858cee834359 Mon Sep 17 00:00:00 2001 From: guosheng Date: Thu, 14 Sep 2017 01:11:31 +0800 Subject: [PATCH] Revise the reduce_op unit test accordingly --- paddle/operators/reduce_op.cc | 56 +++++---- paddle/operators/reduce_op.cu | 4 +- paddle/operators/reduce_op.h | 2 +- .../v2/framework/tests/test_reduce_op.py | 113 +++++++++--------- 4 files changed, 89 insertions(+), 86 deletions(-) diff --git a/paddle/operators/reduce_op.cc b/paddle/operators/reduce_op.cc index ea4bfc50b27..20e6319730c 100644 --- a/paddle/operators/reduce_op.cc +++ b/paddle/operators/reduce_op.cc @@ -30,12 +30,14 @@ class ReduceOp : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto x_rank = x_dims.size(); PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); - int dim = static_cast(ctx.Attr("dim")); + int dim = ctx.Attr("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, - "The dim should be in the range [-rank(input), rank(input)]"); - bool keep_dim = true; // TODO; + "The dim should be in the range [-rank(input), rank(input))"); + PADDLE_ENFORCE_GE(ctx.Attr("keep_dim"), 0, "keep_dim must be 0 or 1"); + PADDLE_ENFORCE_LE(ctx.Attr("keep_dim"), 1, "keep_dim must be 0 or 1"); + bool keep_dim = ctx.Attr("keep_dim") == 1; auto dims_vector = vectorize(x_dims); if (keep_dim || x_rank == 1) { dims_vector[dim] = 1; @@ -59,11 +61,11 @@ class ReduceGradOp : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto x_rank = x_dims.size(); PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported"); - int dim = static_cast(ctx.Attr("dim")); + int dim = ctx.Attr("dim"); if (dim < 0) dim = x_rank + dim; PADDLE_ENFORCE_LT( dim, x_rank, - "The dim should be in the range [-rank(input), rank(input)]"); + "The dim should be in the range [-rank(input), rank(input))"); auto *x_grad = ctx.Output(framework::GradVarName("X")); if (x_grad) x_grad->Resize(x_dims); } @@ -84,12 +86,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; @@ -108,12 +111,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; @@ -132,12 +136,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; @@ -156,12 +161,13 @@ The result tensor has 1 fewer dimension than the input unless `keep_dim` is true )DOC"); AddAttr("dim", "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)]") + "Must be in the range [-rank(input), rank(input))") + .SetDefault(0); + AddAttr( + "keep_dim", + "(int, default 0) " + "Must be 0 or 1. If 1, retain the reduced dimension with length 1.") .SetDefault(0); - AddAttr("keep_dim", - "(bool, default fasle) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); } }; diff --git a/paddle/operators/reduce_op.cu b/paddle/operators/reduce_op.cu index 9effc17ed3f..2dffea3a3a0 100644 --- a/paddle/operators/reduce_op.cu +++ b/paddle/operators/reduce_op.cu @@ -21,8 +21,8 @@ REGISTER_OP_GPU_KERNEL( reduce_sum, ops::ReduceKernel); REGISTER_OP_GPU_KERNEL(reduce_sum_grad, - ops::ReduceGradEigenKernel); + ops::ReduceGradKernel); REGISTER_OP_GPU_KERNEL( reduce_mean, diff --git a/paddle/operators/reduce_op.h b/paddle/operators/reduce_op.h index 9fd7d335ac6..0d62fa7d156 100644 --- a/paddle/operators/reduce_op.h +++ b/paddle/operators/reduce_op.h @@ -127,7 +127,7 @@ class ReduceKernel : public framework::OpKernel { if (dim < 0) dim = x_rank + dim; auto reduce_dim = Eigen::array({{dim}}); // construct the squeezed output tensor - bool keep_dim = true; // static_cast(context.Attr("keep_dim")); + bool keep_dim = context.Attr("keep_dim") == 1; DDim dims = output->dims(); auto dims_vector = vectorize(dims); if (keep_dim && x_rank > 1) { diff --git a/python/paddle/v2/framework/tests/test_reduce_op.py b/python/paddle/v2/framework/tests/test_reduce_op.py index 49ef8eabd29..58951f29025 100644 --- a/python/paddle/v2/framework/tests/test_reduce_op.py +++ b/python/paddle/v2/framework/tests/test_reduce_op.py @@ -1,91 +1,88 @@ import unittest import numpy as np -from gradient_checker import GradientChecker, create_op -from op_test_util import OpTestMeta -from paddle.v2.framework.op import Operator +from op_test import OpTest -class TestSumOp(unittest.TestCase): - __metaclass__ = OpTestMeta - +class TestSumOp(OpTest): def setUp(self): - self.type = "reduce_sum" + self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2} - out = self.inputs['X'].sum(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + def test_check_output(self): + self.check_output() -class TestSumGradOp(GradientChecker): - def test_normal(self): - op = Operator("reduce_sum", X="X", Out="Out", dim=-2) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") + def test_check_grad(self): + self.check_grad(['X'], 'Out') - def test_1d_tensor(self): - op = Operator("reduce_sum", X="X", Out="Out", dim=0) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random(10).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") +class TestMeanOp(OpTest): + def setUp(self): + self.op_type = "reduce_mean" + self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float32")} + self.attrs = {'dim': 1} + self.outputs = {'Out': self.inputs['X'].mean(axis=self.attrs['dim'])} -class TestKeepdimSumOp(unittest.TestCase): - __metaclass__ = OpTestMeta + def test_check_output(self): + self.check_output() - def setUp(self): - self.type = "reduce_sum" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2} - out = self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) - self.outputs = {'Out': out} + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class TestMeanOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestMaxOp(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" def setUp(self): - self.type = "reduce_mean" + self.op_type = "reduce_max" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} self.attrs = {'dim': -1} - out = self.inputs['X'].mean(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.outputs = {'Out': self.inputs['X'].max(axis=self.attrs['dim'])} + + def test_check_output(self): + self.check_output() -class TestMeanGradOp(GradientChecker): - def test_normal(self): - op = Operator("reduce_mean", X="X", Out="Out", dim=-2) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") +class TestMinOp(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" - def test_1d_tensor(self): - op = Operator("reduce_mean", X="X", Out="Out", dim=0) - # use small size to decrease the error of numerical calculation - inputs = {'X': np.random.random(10).astype("float32")} - self.check_grad(op, inputs, set(["X"]), "Out") + def setUp(self): + self.op_type = "reduce_min" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} + self.attrs = {'dim': 2} + self.outputs = {'Out': self.inputs['X'].min(axis=self.attrs['dim'])} + def test_check_output(self): + self.check_output() -class TestMaxOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class TestKeepDimReduce(OpTest): def setUp(self): - self.type = "reduce_max" + self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -1} - out = self.inputs['X'].max(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.attrs = {'dim': -2, 'keep_dim': 1} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) + } + + def test_check_output(self): + self.check_output() + def test_check_grad(self): + self.check_grad(['X'], 'Out') -class TestMinOp(unittest.TestCase): - __metaclass__ = OpTestMeta +class Test1DReduce(OpTest): def setUp(self): - self.type = "reduce_max" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float32")} - self.attrs = {'dim': -2} - out = self.inputs['X'].min(axis=self.attrs['dim']) - self.outputs = {'Out': out} + self.op_type = "reduce_sum" + self.inputs = {'X': np.random.random(20).astype("float32")} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') if __name__ == '__main__': -- GitLab