From 7ee5b8bce12d620def5b9e8fe1505c6ad7db1770 Mon Sep 17 00:00:00 2001 From: Aurelius84 Date: Fri, 11 Oct 2019 08:27:30 +0800 Subject: [PATCH] enhance shape error message of fc API (#20172) (#20349) * add api check in fc test=develop * enforce shape error info of sum op test=develop * fix spelling test=develop * print x_dims info test=develop * enhance shape error info test=develop --- paddle/fluid/operators/sum_op.cc | 54 ++++++++++++------- python/paddle/fluid/layers/nn.py | 16 +++++- .../fluid/tests/unittests/test_fc_op.py | 27 ++++++++++ 3 files changed, 78 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 37204fd72ae..54281b1927d 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -33,10 +33,11 @@ class SumOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null"); + PADDLE_ENFORCE_EQ(ctx->HasInputs("X"), true, + "Inputs(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SumOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + "Output(Out) of SumOp should not be null."); if (ctx->IsRuntime() && ctx->GetOutputsVarType("Out")[0] == framework::proto::VarType::LOD_TENSOR_ARRAY) { @@ -46,10 +47,15 @@ class SumOp : public framework::OperatorWithKernel { auto x_var_types = ctx->GetInputsVarType("X"); auto x_dims = ctx->GetInputsDim("X"); - size_t N = x_dims.size(); - PADDLE_ENFORCE_GT(N, 0, "Input tensors count should > 0."); + auto N = x_dims.size(); + PADDLE_ENFORCE_GT( + N, 0, + "ShapeError: The input tensor X's dimensions of SumOp " + "should be larger than 0. But received X's dimensions %d, " + "X's shape = [%s].", + N, &x_dims); if (N == 1) { - VLOG(3) << "Warning: sum have only one input, may waste memory"; + VLOG(3) << "Warning: SumOp have only one input, may waste memory"; } framework::DDim in_dim({0}); @@ -67,18 +73,29 @@ class SumOp : public framework::OperatorWithKernel { in_dim = x_dim; } else { if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(in_dim, x_dim, - "Input tensors must have same shape"); + PADDLE_ENFORCE_EQ( + in_dim, x_dim, + "ShapeError: The input tensor X of SumOp must have same shape." + "But received X[0]'s shape = [%s], X[%d]'s shape = [%s].", + in_dim, i, x_dim); } else { - PADDLE_ENFORCE_EQ(in_dim.size(), x_dim.size(), - "Input tensors must have same shape size"); + PADDLE_ENFORCE_EQ( + in_dim.size(), x_dim.size(), + "ShapeError: The input tensor X of SumOp must have same " + "dimensions. But received X[0]'s dimensions = %d, X[0]'s shape = " + "[%s], X[%d]'s dimensions = %d, X[%d]'s shape = [%s].", + in_dim.size(), in_dim, i, x_dim.size(), i, x_dim); // if in_dim or x_dim has -1, not check equal - for (int i = 0; i < x_dim.size(); ++i) { - if (x_dim[i] == -1 || in_dim[i] == -1) { + for (int j = 0; j < x_dim.size(); ++j) { + if (x_dim[j] == -1 || in_dim[j] == -1) { continue; } - PADDLE_ENFORCE_EQ(in_dim[i], x_dim[i], - "Input tensors must have same shape if not -1"); + PADDLE_ENFORCE_EQ( + in_dim[j], x_dim[j], + "ShapeError: The input tensor X of SumOp must have same shape " + "if not -1." + "But received X[0]'s shape = [%s], X[%d]'s shape = [%s].", + in_dim, i, x_dim); } } } @@ -107,8 +124,9 @@ class SumOp : public framework::OperatorWithKernel { if (x_vars[0]->IsType()) { int dtype = -1; for (size_t idx = 0; idx < x_vars.size(); ++idx) { - PADDLE_ENFORCE(x_vars[idx] != nullptr, - "Input var[%s] should not be nullptr", x_vars_name[idx]); + PADDLE_ENFORCE_NOT_NULL(x_vars[idx], + "Input var[%s] should not be nullptr", + x_vars_name[idx]); auto tensor = framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_vars[idx]); if (tensor->numel() <= 0 || (!tensor->IsInitialized())) { @@ -202,8 +220,8 @@ class SumOpVarTypeInference : public framework::VarTypeInference { for (auto& each : inputs) { os << " " << each << " type is " << ctx->GetType(each) << "\n"; } - PADDLE_ENFORCE(all_inputs_are_tensor_array, - "Not all inputs are tensor array:\n%s", os.str()); + PADDLE_ENFORCE_EQ(all_inputs_are_tensor_array, true, + "Not all inputs are tensor array:\n%s", os.str()); } var_type = framework::proto::VarType::LOD_TENSOR_ARRAY; } else if (any_input_is_lod_tensor) { diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b0998721125..d92ad84f5ee 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -343,8 +343,22 @@ def fc(input, fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh") """ helper = LayerHelper("fc", **locals()) - + if isinstance(input, (list, tuple)): + for i, input_x in enumerate(input): + if not isinstance(input_x, Variable): + raise TypeError( + "The type of input[%d] in fc must be Variable, but received %s" + % (i, type(input_x))) + else: + if not isinstance(input, Variable): + raise TypeError( + "The type of 'input' in fc must be Variable, but received %s" % + (type(input))) dtype = helper.input_dtype() + if convert_dtype(dtype) not in ['float32', 'float64']: + raise TypeError( + "The data type of 'input' in fc must be float32 or float64, but received %s." + % (convert_dtype(dtype))) mul_results = [] for input_var, param_attr in helper.iter_inputs_and_params(): diff --git a/python/paddle/fluid/tests/unittests/test_fc_op.py b/python/paddle/fluid/tests/unittests/test_fc_op.py index 6c2088af3dd..e50579f76d6 100644 --- a/python/paddle/fluid/tests/unittests/test_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fc_op.py @@ -15,6 +15,8 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid +from paddle.fluid import Program, program_guard def fc_refer(matrix, with_bias, with_relu=False): @@ -122,5 +124,30 @@ class TestFCOpWithBias3(TestFCOp): self.matrix = MatrixGenerate(1, 64, 32, 3, 3, 1) +class TestFCOpError(OpTest): + def test_errors(self): + with program_guard(Program(), Program()): + input_data = np.random.random((2, 4)).astype("float32") + + def test_Variable(): + # the input type must be Variable + fluid.layers.fc(input=input_data, size=1) + + self.assertRaises(TypeError, test_Variable) + + def test_input_list(): + # each of input(list) must be Variable + fluid.layers.fc(input=[input_data], size=1) + + self.assertRaises(TypeError, test_input_list) + + def test_type(): + # dtype must be float32 or float64 + x2 = fluid.layers.data(name='x2', shape=[4], dtype='int32') + fluid.layers.fc(input=x2, size=1) + + self.assertRaises(TypeError, test_type) + + if __name__ == "__main__": unittest.main() -- GitLab