diff --git a/paddle/fluid/operators/metrics/accuracy_op.cc b/paddle/fluid/operators/metrics/accuracy_op.cc index 26e6ab1568d15362c7793fe1eb1e970e4a8946d7..d6360c83f092602a9780196945e335f2884b5b46 100644 --- a/paddle/fluid/operators/metrics/accuracy_op.cc +++ b/paddle/fluid/operators/metrics/accuracy_op.cc @@ -40,12 +40,24 @@ class AccuracyOp : public framework::OperatorWithKernel { // Assume indices has same shape as inference, because // it's the output of topk. - PADDLE_ENFORCE_EQ(label_dim.size(), 2, "label's rank must be 2."); - PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, label_dim[1], 1, - "label's second dimension must be 1"); - PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, inference_dim[0], label_dim[0], - "the inference tensor's num_rows must be" - " the same as label."); + PADDLE_ENFORCE_EQ( + label_dim.size(), 2, + "ShapeError: label's dimensions of AccuracyOp must be 2. " + "But received label's dimensions = %d, label's shape = [%s]", + label_dim.size(), label_dim); + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, label_dim[1], 1, + "ShapeError: label's second dimension of " + "AccuracyOp must be 1. But received label's " + "second dimension is = %d, label's shape = [%s]", + label_dim[1], label_dim); + PADDLE_INFERSHAPE_ENFORCE_EQ( + ctx, inference_dim[0], label_dim[0], + "ShapeError: the output's num_rows of AccuracyOp must be" + " the same as label's num_rows. But received output's " + "shape = [%s], label's shape = [%s], output's num_rows = %d, label's " + "num_rows = %d", + inference_dim, label_dim, inference_dim[0], label_dim[0]); ctx->SetOutputDim("Accuracy", {1}); ctx->SetOutputDim("Correct", {1}); diff --git a/paddle/fluid/operators/metrics/accuracy_op.h b/paddle/fluid/operators/metrics/accuracy_op.h old mode 100644 new mode 100755 diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py index 86f757bd598c087ba30ee687a81abf42de034bc4..2a7cec0af145fdfa49b69ec7c8f5336479767e05 100755 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -23,6 +23,7 @@ from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr from . import nn +from ..data_feeder import convert_dtype __all__ = ['accuracy', 'auc'] @@ -71,6 +72,18 @@ def accuracy(input, label, k=1, correct=None, total=None): #[array([0.6666667], dtype=float32)] """ helper = LayerHelper("accuracy", **locals()) + if not isinstance(input, Variable): + raise TypeError( + "The type of 'input' in accuracy must be Variable, but received %s" + % (type(input))) + if convert_dtype(input.dtype) in ['float16']: + warnings.warn( + "The data type of 'input' in accuracy only support float16 in GPU now." + ) + if convert_dtype(input.dtype) not in ['float16', 'float32', 'float64']: + raise TypeError( + "The data type of 'input' in accuracy must be float16 or float32 or float64, but received %s." + % (convert_dtype(input.dtype))) topk_out, topk_indices = nn.topk(input, k=k) acc_out = helper.create_variable_for_type_inference(dtype="float32") if correct is None: diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py old mode 100644 new mode 100755 index b57aaeb52a053babb2102aae10e8ed96eec634ae..33ccecb77b32a8554dfcc95dba8137a58316c3d3 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -17,6 +17,8 @@ from __future__ import print_function import unittest import numpy as np from op_test import OpTest +import paddle.fluid as fluid +from paddle.fluid import compiler, Program, program_guard class TestAccuracyOp(OpTest): @@ -56,5 +58,21 @@ class TestAccuracyOpFp16(TestAccuracyOp): self.check_output(atol=1e-3) +class TestAccuracyOpError(OpTest): + def test_errors(self): + with program_guard(Program(), Program()): + # The input type of accuracy_op must be Variable. + x1 = fluid.create_lod_tensor( + np.array([[-1]]), [[1]], fluid.CPUPlace()) + self.assertRaises(TypeError, fluid.layers.accuracy, x1) + # The input dtype of accuracy_op must be float32 or float64. + x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") + self.assertRaises(TypeError, fluid.layers.accuracy, x2) + x3 = fluid.layers.data(name='input', shape=[-1, 2], dtype="float16") + label = fluid.layers.data( + name='label', shape=[-1, 1], dtype="int32") + fluid.layers.accuracy(input=x3, label=label) + + if __name__ == '__main__': unittest.main()