From 4724a31839bfc61219d565a102d288ab38cb00f1 Mon Sep 17 00:00:00 2001 From: kinghuin Date: Mon, 20 Apr 2020 14:40:30 +0800 Subject: [PATCH] =?UTF-8?q?[cherry-pick]=20optimize=20compare=20and=20logi?= =?UTF-8?q?cal=20ops=20error=20info=20#23732=EF=BC=8C#23862?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit optimize compare and logical ops error info, add test case for this ops, add some test case for those ops --- .../fluid/operators/controlflow/compare_op.cc | 12 +-- .../fluid/operators/controlflow/logical_op.cc | 20 ++--- python/paddle/fluid/layers/control_flow.py | 77 ++++++++++++++----- python/paddle/fluid/layers/nn.py | 6 ++ .../fluid/tests/unittests/test_compare_op.py | 17 ++++ .../fluid/tests/unittests/test_logical_op.py | 16 ++++ 6 files changed, 113 insertions(+), 35 deletions(-) diff --git a/paddle/fluid/operators/controlflow/compare_op.cc b/paddle/fluid/operators/controlflow/compare_op.cc index 11b24304eae..c1730b10671 100644 --- a/paddle/fluid/operators/controlflow/compare_op.cc +++ b/paddle/fluid/operators/controlflow/compare_op.cc @@ -80,14 +80,16 @@ class CompareOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* context) const override { OpComment comment; - PADDLE_ENFORCE(context->HasInput("X"), "%s operator must have input X", - comment.type); - PADDLE_ENFORCE(context->HasInput("Y"), "%s operator must have input Y", - comment.type); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type); + OP_INOUT_CHECK(context->HasInput("Y"), "Output", "Y", comment.type); auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); + PADDLE_ENFORCE_GE(dim_x.size(), dim_y.size(), - "The size of dim_y should not be greater than dim_x's."); + platform::errors::InvalidArgument( + "The size of dim_y should not be greater than " + "dim_x's, but received dim_y: %d > dim_x: %d", + dim_y.size(), dim_x.size())); context->SetOutputDim("Out", context->GetInputDim("X")); context->ShareLoD("X", "Out"); diff --git a/paddle/fluid/operators/controlflow/logical_op.cc b/paddle/fluid/operators/controlflow/logical_op.cc index 4b3cdf68183..e1cecb0a049 100644 --- a/paddle/fluid/operators/controlflow/logical_op.cc +++ b/paddle/fluid/operators/controlflow/logical_op.cc @@ -79,10 +79,7 @@ class UnaryLogicalOp : public LogicalOp { protected: void InferShape(framework::InferShapeContext *context) const override { OpComment comment; - PADDLE_ENFORCE_EQ( - context->HasInput("X"), true, - platform::errors::NotFound("Input(X) of %s operator must not be null", - comment.type)); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type); context->SetOutputDim("Out", context->GetInputDim("X")); context->ShareLoD("X", "Out"); } @@ -96,10 +93,8 @@ class BinaryLogicalOp : public LogicalOp { protected: void InferShape(framework::InferShapeContext *context) const override { OpComment comment; - PADDLE_ENFORCE_EQ(context->HasInput("X"), true, - "Input(X) of %s operator must not be null", comment.type); - PADDLE_ENFORCE_EQ(context->HasInput("Y"), true, - "Input(Y) of %s operator must not be null", comment.type); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", comment.type); + OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", comment.type); auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); @@ -107,10 +102,11 @@ class BinaryLogicalOp : public LogicalOp { int product_y = framework::product(dim_y); bool check = context->IsRuntime() || (product_x >= 0 && product_y >= 0); if (check) { - PADDLE_ENFORCE_EQ( - product_x, product_y, - "The number of elements in X and Y should be same, %d != %d", - product_x, product_y); + PADDLE_ENFORCE_EQ(product_x, product_y, + platform::errors::InvalidArgument( + "The number of elements in X and Y should be same, " + "but received %d != %d", + product_x, product_y)); } context->SetOutputDim("Out", context->GetInputDim("X")); diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 5cd44a2d41f..52176a6784d 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1415,8 +1415,9 @@ def less_than(x, y, force_cpu=None, cond=None): x(${x_type}): ${x_comment}. y(${y_type}): ${y_comment}. force_cpu(${force_cpu_type}): ${force_cpu_comment}. - cond(Variable|None): Optional output variable to store the result of *less_than* - + cond(Variable, optional): Optional output which can be any created Variable + that meets the requirements to store the result of *less_than*. + if cond is None, a new Varibale will be created to store the result. Returns: ${out_comment}. @@ -1443,6 +1444,15 @@ def less_than(x, y, force_cpu=None, cond=None): result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result]) print(result_value) # [[True, False], [False, False]] """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "less_than") + check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], + "less_than") + if cond is not None: + check_type(cond, "cond", Variable, "less_than") + if force_cpu != None: + check_type(force_cpu, "force_cpu", bool, "less_than") + helper = LayerHelper("less_than", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') @@ -1469,12 +1479,11 @@ def less_equal(x, y, cond=None): Args: x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. - cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the input shape and data type of \ - this tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the input shape \ - and data type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`. + cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *less_equal*. + if cond is None, a new Varibale will be created to store the result. Returns: - Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`. + Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`. Examples: .. code-block:: python @@ -1487,6 +1496,13 @@ def less_equal(x, y, cond=None): out1 = label<= limit #out1=[True, False] """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "less_equal") + check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], + "less_equal") + if cond is not None: + check_type(cond, "cond", Variable, "less_equal") + helper = LayerHelper("less_equal", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') @@ -1511,12 +1527,11 @@ def greater_than(x, y, cond=None): Args: x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. - cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \ - tensor is the same as input :attr:`x` . If is not :attr:`None`, the op will set the variable as output tensor, the shape and data type \ - of this tensor should be the same as input :attr:`x` . Default value is :attr:`None`. + cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *greater_than*. + if cond is None, a new Varibale will be created to store the result. Returns: - Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x` . + Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x` . Examples: .. code-block:: python @@ -1528,6 +1543,13 @@ def greater_than(x, y, cond=None): out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True] out1 = label > limit #out1=[False, True] """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "greater_than") + check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], + "greater_than") + if cond is not None: + check_type(cond, "cond", Variable, "greater_than") + helper = LayerHelper("greater_than", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') @@ -1552,12 +1574,11 @@ def greater_equal(x, y, cond=None): Args: x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. - cond(Variable, optional): If is :attr:`None` , the op will create a variable as output tensor, the shape and data type of this \ - tensor is the same as input :attr:`x`. If is not :attr:`None` , the op will set the variable as output tensor, the shape and data \ - type of this tensor is the same as input :attr:`x`. Default value is :attr:`None`. + cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *greater_equal*. + if cond is None, a new Varibale will be created to store the result. Returns: - Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`. + Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`. Examples: .. code-block:: python @@ -1571,6 +1592,13 @@ def greater_equal(x, y, cond=None): out_1 = label >= limit #out1=[True, False] """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "greater_equal") + check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], + "greater_equal") + if cond is not None: + check_type(cond, "cond", Variable, "greater_equal") + helper = LayerHelper("greater_equal", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') @@ -1614,6 +1642,13 @@ def equal(x, y, cond=None): out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False] out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True] """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "equal") + check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], + "equal") + if cond is not None: + check_type(cond, "cond", Variable, "equal") + helper = LayerHelper("equal", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') @@ -1632,12 +1667,11 @@ def not_equal(x, y, cond=None): Args: x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64. - cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \ - tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the shape and data \ - type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`. + cond(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of *not_equal*. + if cond is None, a new Varibale will be created to store the result. Returns: - Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`. + Variable, the output data type is bool: The tensor variable storing the output, the output shape is same as input :attr:`x`. Examples: .. code-block:: python @@ -1648,6 +1682,13 @@ def not_equal(x, y, cond=None): limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64') out = fluid.layers.not_equal(x=label, y=limit) """ + check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"], + "not_equal") + check_variable_and_dtype(y, "y", ["float32", "float64", "int32", "int64"], + "not_equal") + if cond is not None: + check_type(cond, "cond", Variable, "not_equal") + helper = LayerHelper("not_equal", **locals()) if cond is None: cond = helper.create_variable_for_type_inference(dtype='bool') diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9b2d79b92bd..c85877180d8 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11372,6 +11372,12 @@ Examples: def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): + check_variable_and_dtype(x, "x", ["bool"], op_name) + if y is not None: + check_variable_and_dtype(y, "y", ["bool"], op_name) + if out is not None: + check_type(out, "out", Variable, op_name) + helper = LayerHelper(op_name, **locals()) if binary_op: diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index ec37a43e4ec..9d4a9082b54 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -36,6 +36,23 @@ def create_test_class(op_type, typename, callback): def test_output(self): self.check_output() + def test_errors(self): + with program_guard(Program(), Program()): + x = fluid.layers.data(name='x', shape=[2], dtype='int32') + y = fluid.layers.data(name='y', shape=[2], dtype='int32') + a = fluid.layers.data(name='a', shape=[2], dtype='int16') + if self.op_type == "less_than": + self.assertRaises( + TypeError, + fluid.layers.less_than, + x=x, + y=y, + force_cpu=1) + op = eval("fluid.layers.%s" % self.op_type) + self.assertRaises(TypeError, op, x=x, y=y, cond=1) + self.assertRaises(TypeError, op, x=x, y=a) + self.assertRaises(TypeError, op, x=a, y=y) + cls_name = "{0}_{1}".format(op_type, typename) Cls.__name__ = cls_name globals()[cls_name] = Cls diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index 521851a3d57..8f0049a8d30 100644 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -17,6 +17,8 @@ from __future__ import print_function import op_test import unittest import numpy as np +import paddle.fluid as fluid +from paddle.fluid import Program, program_guard def create_test_class(op_type, callback, binary_op=True): @@ -38,6 +40,20 @@ def create_test_class(op_type, callback, binary_op=True): def test_output(self): self.check_output() + def test_error(self): + with program_guard(Program(), Program()): + x = fluid.layers.data(name='x', shape=[2], dtype='bool') + y = fluid.layers.data(name='y', shape=[2], dtype='bool') + a = fluid.layers.data(name='a', shape=[2], dtype='int32') + op = eval("fluid.layers.%s" % self.op_type) + if self.op_type != "logical_not": + self.assertRaises(TypeError, op, x=x, y=y, out=1) + self.assertRaises(TypeError, op, x=x, y=a) + self.assertRaises(TypeError, op, x=a, y=y) + else: + self.assertRaises(TypeError, op, x=x, out=1) + self.assertRaises(TypeError, op, x=a) + Cls.__name__ = op_type globals()[op_type] = Cls -- GitLab