未验证 提交 843bdbaa 编写于 作者: J JesseyXujin 提交者: GitHub

add input type and dtype check for accuracy_op (#20399)

* add input type and dtype check for accuracy_op

* add input type and dtype check for accuracy_op

* modify python error on accuracy_op,add test=develop

* modify details on accuracy_op, test=develop

* test float16, test=develop

* add warning, test=develop
上级 a5f530a8
...@@ -40,12 +40,24 @@ class AccuracyOp : public framework::OperatorWithKernel { ...@@ -40,12 +40,24 @@ class AccuracyOp : public framework::OperatorWithKernel {
// Assume indices has same shape as inference, because // Assume indices has same shape as inference, because
// it's the output of topk. // it's the output of topk.
PADDLE_ENFORCE_EQ(label_dim.size(), 2, "label's rank must be 2."); PADDLE_ENFORCE_EQ(
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, label_dim[1], 1, label_dim.size(), 2,
"label's second dimension must be 1"); "ShapeError: label's dimensions of AccuracyOp must be 2. "
PADDLE_INFERSHAPE_ENFORCE_EQ(ctx, inference_dim[0], label_dim[0], "But received label's dimensions = %d, label's shape = [%s]",
"the inference tensor's num_rows must be" label_dim.size(), label_dim);
" the same as label."); PADDLE_INFERSHAPE_ENFORCE_EQ(
ctx, label_dim[1], 1,
"ShapeError: label's second dimension of "
"AccuracyOp must be 1. But received label's "
"second dimension is = %d, label's shape = [%s]",
label_dim[1], label_dim);
PADDLE_INFERSHAPE_ENFORCE_EQ(
ctx, inference_dim[0], label_dim[0],
"ShapeError: the output's num_rows of AccuracyOp must be"
" the same as label's num_rows. But received output's "
"shape = [%s], label's shape = [%s], output's num_rows = %d, label's "
"num_rows = %d",
inference_dim, label_dim, inference_dim[0], label_dim[0]);
ctx->SetOutputDim("Accuracy", {1}); ctx->SetOutputDim("Accuracy", {1});
ctx->SetOutputDim("Correct", {1}); ctx->SetOutputDim("Correct", {1});
......
文件模式从 100644 更改为 100755
...@@ -23,6 +23,7 @@ from ..initializer import Normal, Constant ...@@ -23,6 +23,7 @@ from ..initializer import Normal, Constant
from ..framework import Variable from ..framework import Variable
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from . import nn from . import nn
from ..data_feeder import convert_dtype
__all__ = ['accuracy', 'auc'] __all__ = ['accuracy', 'auc']
...@@ -71,6 +72,18 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -71,6 +72,18 @@ def accuracy(input, label, k=1, correct=None, total=None):
#[array([0.6666667], dtype=float32)] #[array([0.6666667], dtype=float32)]
""" """
helper = LayerHelper("accuracy", **locals()) helper = LayerHelper("accuracy", **locals())
if not isinstance(input, Variable):
raise TypeError(
"The type of 'input' in accuracy must be Variable, but received %s"
% (type(input)))
if convert_dtype(input.dtype) in ['float16']:
warnings.warn(
"The data type of 'input' in accuracy only support float16 in GPU now."
)
if convert_dtype(input.dtype) not in ['float16', 'float32', 'float64']:
raise TypeError(
"The data type of 'input' in accuracy must be float16 or float32 or float64, but received %s."
% (convert_dtype(input.dtype)))
topk_out, topk_indices = nn.topk(input, k=k) topk_out, topk_indices = nn.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32") acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None: if correct is None:
......
...@@ -17,6 +17,8 @@ from __future__ import print_function ...@@ -17,6 +17,8 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
class TestAccuracyOp(OpTest): class TestAccuracyOp(OpTest):
...@@ -56,5 +58,21 @@ class TestAccuracyOpFp16(TestAccuracyOp): ...@@ -56,5 +58,21 @@ class TestAccuracyOpFp16(TestAccuracyOp):
self.check_output(atol=1e-3) self.check_output(atol=1e-3)
class TestAccuracyOpError(OpTest):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of accuracy_op must be Variable.
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.accuracy, x1)
# The input dtype of accuracy_op must be float32 or float64.
x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
self.assertRaises(TypeError, fluid.layers.accuracy, x2)
x3 = fluid.layers.data(name='input', shape=[-1, 2], dtype="float16")
label = fluid.layers.data(
name='label', shape=[-1, 1], dtype="int32")
fluid.layers.accuracy(input=x3, label=label)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册