From cdbe5707e9944136151cab6628fcf0f81f19f51e Mon Sep 17 00:00:00 2001 From: mamingjie-China <52770436+mamingjie-China@users.noreply.github.com> Date: Sun, 12 Apr 2020 09:21:27 +0800 Subject: [PATCH] API(argsort, argmax, argmin, cast, diag) error message enhancement * API(argsort, argmax, argmin, cast, diag) error message enhancement, test=develop --- paddle/fluid/operators/arg_min_max_op_base.h | 15 ++++++++--- paddle/fluid/operators/argsort_op.cc | 26 +++++++++---------- paddle/fluid/operators/cast_op.cc | 8 ++---- paddle/fluid/operators/diag_op.cc | 15 ++++++----- python/paddle/fluid/layers/tensor.py | 22 ++++++++++++++-- .../unittests/ir/test_ir_fusion_group_pass.py | 2 +- .../tests/unittests/test_arg_min_max_op.py | 18 +++++++++++++ .../fluid/tests/unittests/test_argsort_op.py | 23 ++++++++++++++++ .../fluid/tests/unittests/test_cast_op.py | 10 ++++--- .../paddle/fluid/tests/unittests/test_diag.py | 14 ++++++++++ 10 files changed, 116 insertions(+), 37 deletions(-) diff --git a/paddle/fluid/operators/arg_min_max_op_base.h b/paddle/fluid/operators/arg_min_max_op_base.h index 76c8426123..0fc7b47c62 100644 --- a/paddle/fluid/operators/arg_min_max_op_base.h +++ b/paddle/fluid/operators/arg_min_max_op_base.h @@ -136,14 +136,21 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "arg_min_max"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "arg_min_max"); const auto& x_dims = ctx->GetInputDim("X"); int64_t axis = ctx->Attrs().Get("axis"); bool keepdims = ctx->Attrs().Get("keepdims"); - PADDLE_ENFORCE(axis >= -x_dims.size() && axis < x_dims.size(), - "'axis' must be inside [-Rank(X), Rank(X))"); + PADDLE_ENFORCE_GE(axis, -x_dims.size(), + platform::errors::InvalidArgument( + "'axis'(%d) must be greater than or equal to" + " -Rank(X)(%d).", + axis, -x_dims.size())); + PADDLE_ENFORCE_LT( + axis, x_dims.size(), + platform::errors::InvalidArgument( + "'axis'(%d) must be less than Rank(X)(%d).", axis, x_dims.size())); auto x_rank = x_dims.size(); if (axis < 0) axis += x_rank; diff --git a/paddle/fluid/operators/argsort_op.cc b/paddle/fluid/operators/argsort_op.cc index 867699f32a..6fc2c1d35f 100644 --- a/paddle/fluid/operators/argsort_op.cc +++ b/paddle/fluid/operators/argsort_op.cc @@ -23,25 +23,23 @@ class ArgsortOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ArgsortOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ArgsortOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Indices"), - "Output(Indices) of ArgsortOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "argsort"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "argsort"); + OP_INOUT_CHECK(ctx->HasOutput("Indices"), "Output", "Indices", "argsort"); auto in_dims = ctx->GetInputDim("X"); int axis = ctx->Attrs().Get("axis"); auto num_dims = in_dims.size(); - PADDLE_ENFORCE(axis < num_dims, - "Attr(axis) %d of ArgsortOp is out of bounds for Input(X)'s " - "rank %d.", - axis, num_dims); - PADDLE_ENFORCE(axis >= -num_dims, - "Attr(axis) %d of ArgsortOp must be not less than " - "-rank(Input(X)) (%d).", - axis, num_dims); + PADDLE_ENFORCE_GE(axis, -num_dims, + platform::errors::InvalidArgument( + "'axis'(%d) must be greater than or equal to" + " -num_dims(%d).", + axis, -num_dims)); + PADDLE_ENFORCE_LT( + axis, num_dims, + platform::errors::InvalidArgument( + "'axis'(%d) must be less than num_dims(%d).", axis, num_dims)); ctx->ShareDim("X", "Out"); ctx->ShareDim("X", "Indices"); diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index 457b641a23..933d959d58 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -59,12 +59,8 @@ class CastOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE_EQ( - context->HasInput("X"), true, - platform::errors::NotFound("The input(X) of cast op must be set")); - PADDLE_ENFORCE_EQ( - context->HasOutput("Out"), true, - platform::errors::NotFound("The output of cast op must be set")); + OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "cast"); + OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out", "cast"); context->SetOutputDim("Out", context->GetInputDim("X")); context->ShareLoD("X", "Out"); } diff --git a/paddle/fluid/operators/diag_op.cc b/paddle/fluid/operators/diag_op.cc index 1a3dd006c7..ecbddc346a 100644 --- a/paddle/fluid/operators/diag_op.cc +++ b/paddle/fluid/operators/diag_op.cc @@ -22,15 +22,16 @@ class DiagOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Diagonal"), - "Input(Diagonal) of DiagOp should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of DiagOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Diagonal"), "Input", "Diagonal", "diag"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "diag"); auto s_dims = ctx->GetInputDim("Diagonal"); - PADDLE_ENFORCE(s_dims.size() == 1, - "The rank of Input(Diagonal) should only be 1."); + + PADDLE_ENFORCE_EQ( + s_dims.size(), 1UL, + platform::errors::InvalidArgument( + "The dimension of 'diagonal' must be 1, but now it is %d.", + s_dims.size())); ctx->SetOutputDim("Out", {s_dims[0], s_dims[0]}); } diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index e705534c99..68da9c49ba 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -196,11 +196,16 @@ def cast(x, dtype): # [[ 1 -2] # [ 0 4]] int32 """ - helper = LayerHelper('cast', **locals()) check_variable_and_dtype( x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'cast') + check_dtype(dtype, 'dtype', [ + 'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int64', + 'uint8' + ], 'cast') + + helper = LayerHelper('cast', **locals()) out = helper.create_variable_for_type_inference(dtype=dtype) helper.append_op( type='cast', @@ -768,6 +773,9 @@ def argmin(x, axis=0): # [[0 0 2] # [1 0 2]] """ + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], + 'argmin') helper = LayerHelper("arg_min", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( @@ -828,6 +836,9 @@ def argmax(x, axis=0): # [[2 3 1] # [0 3 1]] """ + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'], + 'argmax') helper = LayerHelper("arg_max", **locals()) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) helper.append_op( @@ -909,6 +920,9 @@ def argsort(input, axis=-1, descending=False, name=None): # [4. 7. 4. 6.] # [5. 7. 7. 9.]]] """ + check_variable_and_dtype( + input, 'input', + ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort') helper = LayerHelper("argsort", **locals()) out = helper.create_variable_for_type_inference( dtype=input.dtype, stop_gradient=True) @@ -1106,6 +1120,7 @@ def has_inf(x): res = fluid.layers.has_inf(data) """ + # check_type(x, 'x', (Variable), 'has_inf') helper = LayerHelper("isinf", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) @@ -1130,6 +1145,7 @@ def has_nan(x): res = fluid.layers.has_nan(data) """ + # check_type(x, 'x', (Variable), 'has_nan') helper = LayerHelper("isnan", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) @@ -1333,7 +1349,9 @@ def diag(diagonal): # diagonal.shape=(3,) data.shape=(3, 3) """ - + check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag') + check_dtype(diagonal.dtype, 'diagonal', + ['float32', 'float64', 'int32', 'int64'], 'diag') helper = LayerHelper("diag", **locals()) if not isinstance(diagonal, Variable): diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py index aab789bf63..1957ad6e2b 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_fusion_group_pass.py @@ -173,7 +173,7 @@ class FusionGroupPassCastTest(FusionGroupPassTest): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) tmp_0 = layers.elementwise_add(self.feed_vars[0], self.feed_vars[1]) - tmp_1 = layers.cast(tmp_0, dtype="double") + tmp_1 = layers.cast(tmp_0, dtype="float64") tmp_2 = layers.cast(tmp_1, dtype="float32") self.append_gradients(tmp_2) diff --git a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py index 3741adf8bb..0201f0635a 100644 --- a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py @@ -20,6 +20,7 @@ from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core +from paddle.fluid import Program, program_guard class BaseTestCase(OpTest): @@ -285,5 +286,22 @@ class APT_ArgMaxTest(unittest.TestCase): self.assertRaises(TypeError, test_dtype2) +class TestArgMinMaxOpError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + + def test_argmax_x_type(): + x1 = [1, 2, 3] + output = fluid.layers.argmax(x=x1) + + self.assertRaises(TypeError, test_argmax_x_type) + + def test_argmin_x_type(): + x2 = [1, 2, 3] + output = fluid.layers.argmin(x=x2) + + self.assertRaises(TypeError, test_argmin_x_type) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py index 7583387436..e53a0e83db 100644 --- a/python/paddle/fluid/tests/unittests/test_argsort_op.py +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -351,5 +351,28 @@ class TestSortOnGPU(TestSortOnCPU): self.place = core.CPUPlace() +class TestArgsortErrorOnCPU(unittest.TestCase): + def init_place(self): + self.place = core.CPUPlace() + + def test_error(self): + self.init_place() + with fluid.program_guard(fluid.Program()): + + def test_input_type(): + x = [1] + output = fluid.layers.argsort(input=x) + + self.assertRaises(TypeError, test_input_type) + + +class TestArgsortErrorOnGPU(TestArgsortErrorOnCPU): + def init_place(self): + if core.is_compiled_with_cuda(): + self.place = core.CUDAPlace(0) + else: + self.place = core.CPUPlace() + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_cast_op.py b/python/paddle/fluid/tests/unittests/test_cast_op.py index 284d7101aa..084efc9455 100644 --- a/python/paddle/fluid/tests/unittests/test_cast_op.py +++ b/python/paddle/fluid/tests/unittests/test_cast_op.py @@ -78,10 +78,14 @@ class TestCastOpError(unittest.TestCase): np.array([[-1]]), [[1]], fluid.CPUPlace()) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32') # The input dtype of cast_op must be bool, float16, float32, float64, int32, int64, uint8. - x2 = fluid.layers.data(name='x2', shape=[4], dtype='int8') + x2 = fluid.layers.data(name='x2', shape=[4], dtype='int16') self.assertRaises(TypeError, fluid.layers.cast, x2, 'int32') - x3 = fluid.layers.data(name='x3', shape=[4], dtype='int16') - self.assertRaises(TypeError, fluid.layers.cast, x3, 'int32') + + def test_dtype_type(): + x4 = fluid.layers.data(name='x4', shape=[4], dtype='int32') + output = fluid.layers.cast(x=x4, dtype='int16') + + self.assertRaises(TypeError, test_dtype_type) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_diag.py b/python/paddle/fluid/tests/unittests/test_diag.py index eed8b91f0e..b6566676d2 100644 --- a/python/paddle/fluid/tests/unittests/test_diag.py +++ b/python/paddle/fluid/tests/unittests/test_diag.py @@ -17,6 +17,9 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid +from paddle.fluid import core +from paddle.fluid import Program, program_guard class TestDiagOp(OpTest): @@ -39,5 +42,16 @@ class TestDiagOpCase1(TestDiagOp): self.case = np.array([3], dtype='int32') +class TestDiagError(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + + def test_diag_type(): + x = [1, 2, 3] + output = fluid.layers.diag(diag=x) + + self.assertRaises(TypeError, test_diag_type) + + if __name__ == "__main__": unittest.main() -- GitLab