From ec0f78a33daa4a5bc665937a3eca46ead27c8db1 Mon Sep 17 00:00:00 2001 From: hutuxian Date: Thu, 14 May 2020 14:02:10 +0800 Subject: [PATCH] Upgrade Error Message for AucOP & MultiplexOP (#24458) (#24526) --- paddle/fluid/operators/metrics/auc_op.cc | 22 +++++---- paddle/fluid/operators/multiplex_op.cc | 47 ++++++++++++------- paddle/fluid/operators/multiplex_op.cu | 8 ++-- paddle/fluid/operators/multiplex_op.h | 6 ++- python/paddle/fluid/layers/metric_op.py | 2 + python/paddle/fluid/layers/nn.py | 12 +++-- .../fluid/tests/unittests/test_auc_op.py | 23 ++++++++- .../unittests/test_inference_model_io.py | 2 +- .../tests/unittests/test_multiplex_op.py | 34 ++++++++++++++ 9 files changed, 120 insertions(+), 36 deletions(-) diff --git a/paddle/fluid/operators/metrics/auc_op.cc b/paddle/fluid/operators/metrics/auc_op.cc index d9bb2982f0..1dfb22718e 100644 --- a/paddle/fluid/operators/metrics/auc_op.cc +++ b/paddle/fluid/operators/metrics/auc_op.cc @@ -23,29 +23,33 @@ class AucOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Predict"), - "Input of Out should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), - "Input of Label should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Predict"), "Input", "Predict", "Auc"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "Auc"); auto predict_width = ctx->GetInputDim("Predict")[1]; if (ctx->IsRuntime()) { PADDLE_ENFORCE_LE(predict_width, 2, - "Only support binary classification," - "prediction dims[1] should be 1 or 2"); + platform::errors::InvalidArgument( + "Only support binary classification," + "prediction dims[1] should be 1 or 2")); } auto predict_height = ctx->GetInputDim("Predict")[0]; auto label_height = ctx->GetInputDim("Label")[0]; if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(predict_height, label_height, - "Out and Label should have same height."); + platform::errors::InvalidArgument( + "Out and Label should have same height.")); } int num_pred_buckets = ctx->Attrs().Get("num_thresholds") + 1; int slide_steps = ctx->Attrs().Get("slide_steps"); - PADDLE_ENFORCE_GE(num_pred_buckets, 1, "num_thresholds must larger than 1"); - PADDLE_ENFORCE_GE(slide_steps, 0, "slide_steps must be natural number"); + PADDLE_ENFORCE_GE( + num_pred_buckets, 1, + platform::errors::InvalidArgument("num_thresholds must larger than 1")); + PADDLE_ENFORCE_GE(slide_steps, 0, + platform::errors::InvalidArgument( + "slide_steps must be natural number")); ctx->SetOutputDim("AUC", {1}); diff --git a/paddle/fluid/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc index ec77732afe..313a479ea3 100644 --- a/paddle/fluid/operators/multiplex_op.cc +++ b/paddle/fluid/operators/multiplex_op.cc @@ -26,28 +26,39 @@ class MultiplexOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) shouldn't be null."); - PADDLE_ENFORCE(!ctx->Inputs("X").empty(), - "MultiInput(X) shouldn't be empty."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null."); + OP_INOUT_CHECK(ctx->HasInput("Ids"), "Input", "Ids", "Multiplex"); + PADDLE_ENFORCE_NE( + ctx->Inputs("X").empty(), true, + platform::errors::InvalidArgument("MultiInput(X) shouldn't be empty.")); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Multiplex"); auto ids_dim = ctx->GetInputDim("Ids"); - PADDLE_ENFORCE( - ids_dim.size() == 2 && ids_dim[1] == 1, - "The index tensor must be a vector with size batchSize x 1."); + PADDLE_ENFORCE_EQ( + ids_dim.size(), 2, + platform::errors::PreconditionNotMet( + "The index tensor must be a vector with 2 dimensions")); + PADDLE_ENFORCE_EQ( + ids_dim[1], 1, + platform::errors::PreconditionNotMet( + "The index tensor must be a vector with batchSize x 1.")); auto ins_dims = ctx->GetInputsDim("X"); auto num_ins = ins_dims.size(); - PADDLE_ENFORCE(num_ins > 1, - "multiplex operator should have more than " - "one candidate input tensors."); + PADDLE_ENFORCE_GT(num_ins, 1, + platform::errors::InvalidArgument( + "multiplex operator should have more than " + "one candidate input tensors.")); auto in_dim = ins_dims[0]; - PADDLE_ENFORCE(in_dim.size() >= 2, - "The rank of candidate tensors must be not less than 2."); + PADDLE_ENFORCE_GE( + in_dim.size(), 2, + platform::errors::InvalidArgument( + "The rank of candidate tensors must be not less than 2.")); for (size_t i = 1; i < num_ins; i++) { auto dim = ins_dims[i]; - PADDLE_ENFORCE(in_dim == dim, - "All the candidate tensors must have the same size."); + PADDLE_ENFORCE_EQ( + in_dim, dim, + platform::errors::PreconditionNotMet( + "All the candidate tensors must have the same size.")); } ctx->SetOutputDim("Out", in_dim); } @@ -115,9 +126,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { auto dxs = ctx->Outputs(framework::GradVarName("X")); - PADDLE_ENFORCE(!dxs.empty(), "Output(X@Grad) should not be null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null."); + PADDLE_ENFORCE_NE(dxs.empty(), true, + platform::errors::InvalidArgument( + "Output(X@Grad) should not be null.")); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "MultiplexGrad"); auto dout_dim = ctx->GetInputDim(framework::GradVarName("Out")); ctx->SetOutputsDim(framework::GradVarName("X"), std::vector(dxs.size(), dout_dim)); diff --git a/paddle/fluid/operators/multiplex_op.cu b/paddle/fluid/operators/multiplex_op.cu index 1ef54ecc73..1da5df6b8a 100644 --- a/paddle/fluid/operators/multiplex_op.cu +++ b/paddle/fluid/operators/multiplex_op.cu @@ -39,9 +39,11 @@ class MultiplexGPUKernel : public framework::OpKernel { platform::CUDAPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { int32_t k = index[i]; - PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); - PADDLE_ENFORCE_LT((size_t)k, ins.size(), - "index exceeds the number of candidate tensors."); + PADDLE_ENFORCE_GE(k, 0, platform::errors::PreconditionNotMet( + "index must be nonnegative.")); + PADDLE_ENFORCE_LT(static_cast(k), ins.size(), + platform::errors::PreconditionNotMet( + "index exceeds the number of candidate tensors.")); memory::Copy(place, out->data() + i * cols, place, ins[k]->data() + i * cols, cols * sizeof(T), stream); } diff --git a/paddle/fluid/operators/multiplex_op.h b/paddle/fluid/operators/multiplex_op.h index 44d6cc84a6..d9653406eb 100644 --- a/paddle/fluid/operators/multiplex_op.h +++ b/paddle/fluid/operators/multiplex_op.h @@ -37,9 +37,11 @@ class MultiplexCPUKernel : public framework::OpKernel { platform::CPUPlace place = boost::get(ctx.GetPlace()); for (auto i = 0; i < rows; i++) { int32_t k = index[i]; - PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); + PADDLE_ENFORCE_GE(k, 0, platform::errors::PreconditionNotMet( + "index must be nonnegative.")); PADDLE_ENFORCE_LT(static_cast(k), ins.size(), - "index exceeds the number of candidate tensors."); + platform::errors::PreconditionNotMet( + "index exceeds the number of candidate tensors.")); memory::Copy(place, out->data() + i * cols, place, ins[k]->data() + i * cols, cols * sizeof(T)); } diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index 3b6207c2c6..3ec88a2d5d 100644 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -175,6 +175,8 @@ def auc(input, #[array([0.5])] """ helper = LayerHelper("auc", **locals()) + check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'auc') + check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'auc') auc_out = helper.create_variable_for_type_inference(dtype="float64") batch_auc_out = helper.create_variable_for_type_inference(dtype="float64") # make tp, tn, fp, fn persistable, so that can accumulate all batches. diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index ce555c5599..25c7d5339c 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6995,9 +6995,15 @@ def multiplex(inputs, index): """ helper = LayerHelper('multiplex', **locals()) - if not isinstance(inputs, list) and len(inputs) < 2: - raise ValueError("inputs should be a list object and contains at least " - "2 elements.") + check_type(inputs, 'inputs', (list), 'multiplex') + if len(inputs) < 2: + raise ValueError( + "inputs should be a list object with at least 2 elements.") + for id, x in enumerate(inputs): + check_variable_and_dtype(x, 'input[' + str(id) + ']', + ['float32', 'float64', 'int32', 'int64'], + 'multiplex') + check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex') out = helper.create_variable_for_type_inference(inputs[0].dtype) helper.append_op( diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index a07587fdb2..6568da5d00 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest from paddle.fluid import metrics +import paddle.fluid as fluid class TestAucOp(OpTest): @@ -104,5 +105,25 @@ class TestGlobalAucOp(OpTest): self.check_output() -if __name__ == "__main__": +class TestAucOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + + def test_type1(): + data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int") + label1 = fluid.data(name="label1", shape=[-1], dtype="int") + result1 = fluid.layers.auc(input=data1, label=label1) + + self.assertRaises(TypeError, test_type1) + + def test_type2(): + data2 = fluid.data( + name="input2", shape=[-1, 2], dtype="float32") + label2 = fluid.data(name="label2", shape=[-1], dtype="float32") + result2 = fluid.layers.auc(input=data2, label=label2) + + self.assertRaises(TypeError, test_type2) + + +if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index f9408dfee5..2133255a02 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -142,7 +142,7 @@ class TestSaveInferenceModel(unittest.TestCase): # fake program without feed/fetch with program_guard(program, init_program): x = layers.data(name='x', shape=[2], dtype='float32') - y = layers.data(name='y', shape=[1], dtype='float32') + y = layers.data(name='y', shape=[1], dtype='int32') predict = fluid.layers.fc(input=x, size=2, act='softmax') acc = fluid.layers.accuracy(input=predict, label=y) auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, diff --git a/python/paddle/fluid/tests/unittests/test_multiplex_op.py b/python/paddle/fluid/tests/unittests/test_multiplex_op.py index 9b42b705ef..47c648d44b 100644 --- a/python/paddle/fluid/tests/unittests/test_multiplex_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiplex_op.py @@ -17,6 +17,7 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid class TestMultiplexOp(OpTest): @@ -57,5 +58,38 @@ class TestMultiplexOp(OpTest): self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3')) +class TestMultiplexOpError(unittest.TestCase): + def test_errors(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + x1 = fluid.data(name='x1', shape=[None, 2], dtype='int64') + x2 = fluid.data(name='x2', shape=[None, 2], dtype='int64') + index = fluid.data(name='index', shape=[None, 1], dtype='int32') + + def test_list(): + # the inputs type must be list + fluid.layers.multiplex(inputs=x1, index=index) + + self.assertRaises(TypeError, test_list) + + def test_len(): + fluid.layers.multiplex(inputs=[x1], index=index) + + self.assertRaises(ValueError, test_len) + + def test_type(): + y1 = fluid.data(name='y1', shape=[None, 2], dtype='int16') + y2 = fluid.data(name='y2', shape=[None, 2], dtype='int16') + fluid.layers.multiplex(inputs=[y1, y2], index=index) + + self.assertRaises(TypeError, test_type) + + def test_type2(): + index2 = fluid.data( + name='index2', shape=[None, 1], dtype='int16') + fluid.layers.multiplex(inputs=[x1, x2], index=index2) + + self.assertRaises(TypeError, test_type2) + + if __name__ == '__main__': unittest.main() -- GitLab