未验证 提交 ec0f78a3 编写于 作者: H hutuxian 提交者: GitHub

Upgrade Error Message for AucOP & MultiplexOP (#24458) (#24526)

上级 101fcf81
...@@ -23,29 +23,33 @@ class AucOp : public framework::OperatorWithKernel { ...@@ -23,29 +23,33 @@ class AucOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Predict"), OP_INOUT_CHECK(ctx->HasInput("Predict"), "Input", "Predict", "Auc");
"Input of Out should not be null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "Auc");
PADDLE_ENFORCE(ctx->HasInput("Label"),
"Input of Label should not be null.");
auto predict_width = ctx->GetInputDim("Predict")[1]; auto predict_width = ctx->GetInputDim("Predict")[1];
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_LE(predict_width, 2, PADDLE_ENFORCE_LE(predict_width, 2,
"Only support binary classification," platform::errors::InvalidArgument(
"prediction dims[1] should be 1 or 2"); "Only support binary classification,"
"prediction dims[1] should be 1 or 2"));
} }
auto predict_height = ctx->GetInputDim("Predict")[0]; auto predict_height = ctx->GetInputDim("Predict")[0];
auto label_height = ctx->GetInputDim("Label")[0]; auto label_height = ctx->GetInputDim("Label")[0];
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(predict_height, label_height, PADDLE_ENFORCE_EQ(predict_height, label_height,
"Out and Label should have same height."); platform::errors::InvalidArgument(
"Out and Label should have same height."));
} }
int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1; int num_pred_buckets = ctx->Attrs().Get<int>("num_thresholds") + 1;
int slide_steps = ctx->Attrs().Get<int>("slide_steps"); int slide_steps = ctx->Attrs().Get<int>("slide_steps");
PADDLE_ENFORCE_GE(num_pred_buckets, 1, "num_thresholds must larger than 1"); PADDLE_ENFORCE_GE(
PADDLE_ENFORCE_GE(slide_steps, 0, "slide_steps must be natural number"); num_pred_buckets, 1,
platform::errors::InvalidArgument("num_thresholds must larger than 1"));
PADDLE_ENFORCE_GE(slide_steps, 0,
platform::errors::InvalidArgument(
"slide_steps must be natural number"));
ctx->SetOutputDim("AUC", {1}); ctx->SetOutputDim("AUC", {1});
......
...@@ -26,28 +26,39 @@ class MultiplexOp : public framework::OperatorWithKernel { ...@@ -26,28 +26,39 @@ class MultiplexOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Ids"), "Input(Ids) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("Ids"), "Input", "Ids", "Multiplex");
PADDLE_ENFORCE(!ctx->Inputs("X").empty(), PADDLE_ENFORCE_NE(
"MultiInput(X) shouldn't be empty."); ctx->Inputs("X").empty(), true,
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null."); platform::errors::InvalidArgument("MultiInput(X) shouldn't be empty."));
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Multiplex");
auto ids_dim = ctx->GetInputDim("Ids"); auto ids_dim = ctx->GetInputDim("Ids");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
ids_dim.size() == 2 && ids_dim[1] == 1, ids_dim.size(), 2,
"The index tensor must be a vector with size batchSize x 1."); platform::errors::PreconditionNotMet(
"The index tensor must be a vector with 2 dimensions"));
PADDLE_ENFORCE_EQ(
ids_dim[1], 1,
platform::errors::PreconditionNotMet(
"The index tensor must be a vector with batchSize x 1."));
auto ins_dims = ctx->GetInputsDim("X"); auto ins_dims = ctx->GetInputsDim("X");
auto num_ins = ins_dims.size(); auto num_ins = ins_dims.size();
PADDLE_ENFORCE(num_ins > 1, PADDLE_ENFORCE_GT(num_ins, 1,
"multiplex operator should have more than " platform::errors::InvalidArgument(
"one candidate input tensors."); "multiplex operator should have more than "
"one candidate input tensors."));
auto in_dim = ins_dims[0]; auto in_dim = ins_dims[0];
PADDLE_ENFORCE(in_dim.size() >= 2, PADDLE_ENFORCE_GE(
"The rank of candidate tensors must be not less than 2."); in_dim.size(), 2,
platform::errors::InvalidArgument(
"The rank of candidate tensors must be not less than 2."));
for (size_t i = 1; i < num_ins; i++) { for (size_t i = 1; i < num_ins; i++) {
auto dim = ins_dims[i]; auto dim = ins_dims[i];
PADDLE_ENFORCE(in_dim == dim, PADDLE_ENFORCE_EQ(
"All the candidate tensors must have the same size."); in_dim, dim,
platform::errors::PreconditionNotMet(
"All the candidate tensors must have the same size."));
} }
ctx->SetOutputDim("Out", in_dim); ctx->SetOutputDim("Out", in_dim);
} }
...@@ -115,9 +126,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel { ...@@ -115,9 +126,11 @@ class MultiplexGradOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
auto dxs = ctx->Outputs(framework::GradVarName("X")); auto dxs = ctx->Outputs(framework::GradVarName("X"));
PADDLE_ENFORCE(!dxs.empty(), "Output(X@Grad) should not be null."); PADDLE_ENFORCE_NE(dxs.empty(), true,
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null."); "Output(X@Grad) should not be null."));
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "MultiplexGrad");
auto dout_dim = ctx->GetInputDim(framework::GradVarName("Out")); auto dout_dim = ctx->GetInputDim(framework::GradVarName("Out"));
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->SetOutputsDim(framework::GradVarName("X"),
std::vector<framework::DDim>(dxs.size(), dout_dim)); std::vector<framework::DDim>(dxs.size(), dout_dim));
......
...@@ -39,9 +39,11 @@ class MultiplexGPUKernel : public framework::OpKernel<T> { ...@@ -39,9 +39,11 @@ class MultiplexGPUKernel : public framework::OpKernel<T> {
platform::CUDAPlace place = boost::get<platform::CUDAPlace>(ctx.GetPlace()); platform::CUDAPlace place = boost::get<platform::CUDAPlace>(ctx.GetPlace());
for (auto i = 0; i < rows; i++) { for (auto i = 0; i < rows; i++) {
int32_t k = index[i]; int32_t k = index[i];
PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); PADDLE_ENFORCE_GE(k, 0, platform::errors::PreconditionNotMet(
PADDLE_ENFORCE_LT((size_t)k, ins.size(), "index must be nonnegative."));
"index exceeds the number of candidate tensors."); PADDLE_ENFORCE_LT(static_cast<size_t>(k), ins.size(),
platform::errors::PreconditionNotMet(
"index exceeds the number of candidate tensors."));
memory::Copy(place, out->data<T>() + i * cols, place, memory::Copy(place, out->data<T>() + i * cols, place,
ins[k]->data<T>() + i * cols, cols * sizeof(T), stream); ins[k]->data<T>() + i * cols, cols * sizeof(T), stream);
} }
......
...@@ -37,9 +37,11 @@ class MultiplexCPUKernel : public framework::OpKernel<T> { ...@@ -37,9 +37,11 @@ class MultiplexCPUKernel : public framework::OpKernel<T> {
platform::CPUPlace place = boost::get<platform::CPUPlace>(ctx.GetPlace()); platform::CPUPlace place = boost::get<platform::CPUPlace>(ctx.GetPlace());
for (auto i = 0; i < rows; i++) { for (auto i = 0; i < rows; i++) {
int32_t k = index[i]; int32_t k = index[i];
PADDLE_ENFORCE_GE(k, 0, "index must be nonnegative."); PADDLE_ENFORCE_GE(k, 0, platform::errors::PreconditionNotMet(
"index must be nonnegative."));
PADDLE_ENFORCE_LT(static_cast<size_t>(k), ins.size(), PADDLE_ENFORCE_LT(static_cast<size_t>(k), ins.size(),
"index exceeds the number of candidate tensors."); platform::errors::PreconditionNotMet(
"index exceeds the number of candidate tensors."));
memory::Copy(place, out->data<T>() + i * cols, place, memory::Copy(place, out->data<T>() + i * cols, place,
ins[k]->data<T>() + i * cols, cols * sizeof(T)); ins[k]->data<T>() + i * cols, cols * sizeof(T));
} }
......
...@@ -175,6 +175,8 @@ def auc(input, ...@@ -175,6 +175,8 @@ def auc(input,
#[array([0.5])] #[array([0.5])]
""" """
helper = LayerHelper("auc", **locals()) helper = LayerHelper("auc", **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'auc')
check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'auc')
auc_out = helper.create_variable_for_type_inference(dtype="float64") auc_out = helper.create_variable_for_type_inference(dtype="float64")
batch_auc_out = helper.create_variable_for_type_inference(dtype="float64") batch_auc_out = helper.create_variable_for_type_inference(dtype="float64")
# make tp, tn, fp, fn persistable, so that can accumulate all batches. # make tp, tn, fp, fn persistable, so that can accumulate all batches.
......
...@@ -6995,9 +6995,15 @@ def multiplex(inputs, index): ...@@ -6995,9 +6995,15 @@ def multiplex(inputs, index):
""" """
helper = LayerHelper('multiplex', **locals()) helper = LayerHelper('multiplex', **locals())
if not isinstance(inputs, list) and len(inputs) < 2: check_type(inputs, 'inputs', (list), 'multiplex')
raise ValueError("inputs should be a list object and contains at least " if len(inputs) < 2:
"2 elements.") raise ValueError(
"inputs should be a list object with at least 2 elements.")
for id, x in enumerate(inputs):
check_variable_and_dtype(x, 'input[' + str(id) + ']',
['float32', 'float64', 'int32', 'int64'],
'multiplex')
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
out = helper.create_variable_for_type_inference(inputs[0].dtype) out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op( helper.append_op(
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
from paddle.fluid import metrics from paddle.fluid import metrics
import paddle.fluid as fluid
class TestAucOp(OpTest): class TestAucOp(OpTest):
...@@ -104,5 +105,25 @@ class TestGlobalAucOp(OpTest): ...@@ -104,5 +105,25 @@ class TestGlobalAucOp(OpTest):
self.check_output() self.check_output()
if __name__ == "__main__": class TestAucOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
def test_type1():
data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int")
label1 = fluid.data(name="label1", shape=[-1], dtype="int")
result1 = fluid.layers.auc(input=data1, label=label1)
self.assertRaises(TypeError, test_type1)
def test_type2():
data2 = fluid.data(
name="input2", shape=[-1, 2], dtype="float32")
label2 = fluid.data(name="label2", shape=[-1], dtype="float32")
result2 = fluid.layers.auc(input=data2, label=label2)
self.assertRaises(TypeError, test_type2)
if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -142,7 +142,7 @@ class TestSaveInferenceModel(unittest.TestCase): ...@@ -142,7 +142,7 @@ class TestSaveInferenceModel(unittest.TestCase):
# fake program without feed/fetch # fake program without feed/fetch
with program_guard(program, init_program): with program_guard(program, init_program):
x = layers.data(name='x', shape=[2], dtype='float32') x = layers.data(name='x', shape=[2], dtype='float32')
y = layers.data(name='y', shape=[1], dtype='float32') y = layers.data(name='y', shape=[1], dtype='int32')
predict = fluid.layers.fc(input=x, size=2, act='softmax') predict = fluid.layers.fc(input=x, size=2, act='softmax')
acc = fluid.layers.accuracy(input=predict, label=y) acc = fluid.layers.accuracy(input=predict, label=y)
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict,
......
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
class TestMultiplexOp(OpTest): class TestMultiplexOp(OpTest):
...@@ -57,5 +58,38 @@ class TestMultiplexOp(OpTest): ...@@ -57,5 +58,38 @@ class TestMultiplexOp(OpTest):
self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3')) self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3'))
class TestMultiplexOpError(unittest.TestCase):
def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
x1 = fluid.data(name='x1', shape=[None, 2], dtype='int64')
x2 = fluid.data(name='x2', shape=[None, 2], dtype='int64')
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
def test_list():
# the inputs type must be list
fluid.layers.multiplex(inputs=x1, index=index)
self.assertRaises(TypeError, test_list)
def test_len():
fluid.layers.multiplex(inputs=[x1], index=index)
self.assertRaises(ValueError, test_len)
def test_type():
y1 = fluid.data(name='y1', shape=[None, 2], dtype='int16')
y2 = fluid.data(name='y2', shape=[None, 2], dtype='int16')
fluid.layers.multiplex(inputs=[y1, y2], index=index)
self.assertRaises(TypeError, test_type)
def test_type2():
index2 = fluid.data(
name='index2', shape=[None, 1], dtype='int16')
fluid.layers.multiplex(inputs=[x1, x2], index=index2)
self.assertRaises(TypeError, test_type2)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册