未验证 提交 f65c4b49 编写于 作者: H huangjun12 提交者: GitHub

[Cherry-pick Release/2.0] Error message enhancement of 6 op test=release/2.0 (#23953)

* Error message enhancement of 6 op, test=release/2.0
* refine huber loss unittests, test=release/2.0
上级 6ed1109f
......@@ -25,21 +25,31 @@ class HingeLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Logits"),
"Input(Logits) must be initialized.");
PADDLE_ENFORCE(ctx->HasInput("Labels"),
"Input(Labels) must be initialized.");
OP_INOUT_CHECK(ctx->HasInput("Logits"), "Input", "Logits", "HingeLoss");
OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "HingeLoss");
auto pred_dims = ctx->GetInputDim("Logits");
auto label_dims = ctx->GetInputDim("Labels");
PADDLE_ENFORCE_EQ(pred_dims, label_dims);
PADDLE_ENFORCE_EQ(pred_dims.size(), 2,
"The rank of Input(Logits) must be 2 and the shape is "
"[batch_size, 1].");
PADDLE_ENFORCE_EQ(
pred_dims, label_dims,
platform::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
pred_dims, label_dims));
PADDLE_ENFORCE_EQ(
pred_dims.size(), 2,
platform::errors::InvalidArgument("Input(input) rank should be 2, "
"but received input rank(%d) != 2",
pred_dims.size()));
PADDLE_ENFORCE_EQ(pred_dims[1], 1,
"Each row of Input(Logits) contains a real value, "
"so the 2nd dimension of Input(Logits) must be 1.");
platform::errors::InvalidArgument(
"The second dimension of Input(input) should be 1, "
"as each row of input contains a real value, "
"but received second dimension of input (%d) != 1",
pred_dims[1]));
ctx->SetOutputDim("Loss", {pred_dims[0], 1});
ctx->ShareLoD("Logits", "Loss");
......@@ -81,19 +91,22 @@ class HingeLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Logits"),
"Input(Logits) should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Labels"),
"Input(Labels) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")),
"Input(Loss@GRAD) should not be null.");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")),
"Input(Logits@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Logits"), "Input", "Logits", "HingeLossGrad");
OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "HingeLossGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Loss")), "Input",
"Loss@GRAD", "HingeLossGrad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Logits")), "Output",
"Logits@GRAD", "HingeLossGrad");
auto pred_dims = ctx->GetInputDim("Logits");
auto loss_grad_dims = ctx->GetInputDim(framework::GradVarName("Loss"));
PADDLE_ENFORCE_EQ(loss_grad_dims, pred_dims);
PADDLE_ENFORCE_EQ(loss_grad_dims, pred_dims,
platform::errors::InvalidArgument(
"The shape of loss gradient should be the same as "
"the shape of Input(input), but received the loss "
"gradient shape [%s] != input shape [%s]",
loss_grad_dims, pred_dims));
auto pred_grad_name = framework::GradVarName("Logits");
ctx->SetOutputDim(pred_grad_name, pred_dims);
......
......@@ -25,23 +25,27 @@ class HuberLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) must be initialized.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true,
"Input(Y) must be initialized.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "HuberLoss");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "HuberLoss");
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(),
"The rank of Input(X) should be equal to "
"the rank of Input(Y).");
platform::errors::InvalidArgument(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)",
x_dims.size(), y_dims.size()));
bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) ||
framework::contain_unknown_dim(y_dims);
if (ctx->IsRuntime() || !contain_unknown_dim) {
PADDLE_ENFORCE_EQ(
x_dims, y_dims,
"The Input(X) and Input(Label) should have the same shape.");
platform::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
x_dims, y_dims));
}
auto out_dims = y_dims;
......@@ -99,8 +103,8 @@ class HuberLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")), "Input",
"Out@GRAD", "HuberLossGrad");
auto residual_dims = ctx->GetInputDim("Residual");
......
......@@ -169,17 +169,25 @@ class LRNOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LRNOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of LRNOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("MidOut"),
"MidOut(Out) of LRNOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "LRN");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "LRN");
OP_INOUT_CHECK(ctx->HasOutput("MidOut"), "Output", "MidOut", "LRN");
auto x_dim = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dim.size(), 4, "Input(X)'rank of LRNOp should be 4.");
PADDLE_ENFORCE_EQ(x_dim.size(), 4, platform::errors::InvalidArgument(
"Input(input) rank should be 4, "
"but received input rank (%d) != 4",
x_dim.size()));
int n = ctx->Attrs().Get<int>("n");
PADDLE_ENFORCE(n > 0 && n % 2 == 1, "n should be positive odd value");
PADDLE_ENFORCE_GT(n, 0UL, platform::errors::InvalidArgument(
"Argument(n) should be positive, "
"but received n(%d) not greater than 0",
n));
PADDLE_ENFORCE_EQ(n % 2, 1UL, platform::errors::InvalidArgument(
"Argument(n) should be odd value, "
"but received n(%d) is not an odd value",
n));
ctx->SetOutputDim("Out", x_dim);
ctx->ShareLoD("X", /*->*/ "Out");
......@@ -317,10 +325,10 @@ class LRNOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
PADDLE_ENFORCE(ctx->HasInput("MidOut"), "Input(MidOut) should not be null");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "LRNGrad");
OP_INOUT_CHECK(ctx->HasInput("MidOut"), "Input", "MidOu", "LRNGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@GRAD", "LRNGrad");
auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
......
......@@ -68,9 +68,18 @@ class LRNKernel : public framework::OpKernel<T> {
T beta = ctx.Attr<float>("beta");
T k = ctx.Attr<float>("k");
PADDLE_ENFORCE(alpha >= 0.0, "alpha should >= 0.0");
PADDLE_ENFORCE(beta >= 0.0, "beta should >= 0.0");
PADDLE_ENFORCE(k >= 0.0, "k should >= 0.0");
PADDLE_ENFORCE_GE(alpha, 0UL, platform::errors::InvalidArgument(
"Argument(alpha) should >= 0.0, "
"but received alpha(%d) less than 0",
alpha));
PADDLE_ENFORCE_GE(beta, 0UL, platform::errors::InvalidArgument(
"Argument(beta) should >= 0.0, "
"but received beta(%d) less than 0",
beta));
PADDLE_ENFORCE_GE(k, 0UL, platform::errors::InvalidArgument(
"Argument(k) should >= 0.0, "
"but received k(%d) less than 0",
k));
LRNFunctor<DeviceContext, T> f;
f(ctx, x, out, mid, N, C, H, W, n, k, alpha, beta, data_layout);
......@@ -132,9 +141,11 @@ class LRNGradKernel : public framework::OpKernel<T> {
T alpha = ctx.Attr<T>("alpha");
T beta = ctx.Attr<T>("beta");
PADDLE_ENFORCE(
!ctx.Attr<bool>("is_test"),
"is_test attribute should be set to False in training phase.");
PADDLE_ENFORCE_EQ(
!ctx.Attr<bool>("is_test"), true,
platform::errors::InvalidArgument(
"is_test attribute should be set to False in training phase. "
"but received is_test == True in training phase."));
LRNGradFunctor<DeviceContext, T> f;
f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta, data_layout);
......
......@@ -23,21 +23,33 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized.");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ModifiedHuberLoss");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ModifiedHuberLoss");
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2.");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
"Input(input) rank should be 2, "
"but received input rank(%d) != 2",
x_dims.size()));
if (ctx->IsRuntime() ||
(framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) {
PADDLE_ENFORCE_EQ(x_dims, y_dims,
"The shape of X and Y must be the same.");
PADDLE_ENFORCE_EQ(
x_dims, y_dims,
platform::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
x_dims, y_dims));
}
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1.");
PADDLE_ENFORCE_EQ(x_dims[1], 1,
platform::errors::InvalidArgument(
"The second dimension of Input(input) should be 1, "
"but received second dimension of input (%d) != 1",
x_dims[1]));
}
ctx->SetOutputDim("IntermediateVal", x_dims);
......@@ -87,11 +99,11 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized.");
PADDLE_ENFORCE(ctx->HasInput("IntermediateVal"),
"Intermediate value must not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@Grad) must not be null.");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ModifiedHuberLossGrad");
OP_INOUT_CHECK(ctx->HasInput("IntermediateVal"), "Input", "IntermediateVal",
"ModifiedHuberLossGrad");
OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")), "Input",
"Out@GRAD", "ModifiedHuberLossGrad");
auto y_dims = ctx->GetInputDim("Y");
auto intermediate_dims = ctx->GetInputDim("IntermediateVal");
......@@ -100,9 +112,20 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(
intermediate_dims, y_dims,
"The shape of X and intermediate value must be the same.");
PADDLE_ENFORCE_EQ(out_grad_dims, y_dims,
"The shape of Input(Out@Grad) and X must be the same.");
platform::errors::InvalidArgument(
"The shape of Intermediate variable which will be reused in "
"backward processing should the same as "
"the shape of Input(label), but received Intermediate variable "
"shape [%s] != label shape [%s]",
intermediate_dims, y_dims));
PADDLE_ENFORCE_EQ(
out_grad_dims, y_dims,
platform::errors::InvalidArgument(
"The shape of output gradient should be the same as "
"the shape of Input(label), but received the output gradient "
"shape [%s] != label shape [%s]",
out_grad_dims, y_dims));
}
if (ctx->HasOutput(framework::GradVarName("X"))) {
......
......@@ -24,17 +24,22 @@ class OneHotOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of OneHotOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of OneHotOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "OneHot");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "OneHot");
auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) should be at least 2.");
platform::errors::InvalidArgument(
"Input(input) rank should be at least 2, "
"but received input rank (%d) less than 2",
x_dims.size()));
if (ctx->IsRuntime() || x_dims[x_dims.size() - 1] > 0) {
PADDLE_ENFORCE_GE(x_dims[x_dims.size() - 1], 1U,
"Last dimension of Input(X) should be 1.");
platform::errors::InvalidArgument(
"Last dimension of Input(input) should be 1, "
"but received input Last dimension(%d) != 1",
x_dims[x_dims.size() - 1]));
}
framework::DDim out_dims(x_dims);
......
......@@ -51,11 +51,20 @@ struct OneHotOpFunctor {
}
} else {
for (int i = 0; i < numel; ++i) {
PADDLE_ENFORCE_GE(p_in_data[i], 0,
"Illegal index value, should be at least 0.");
PADDLE_ENFORCE_GE(
p_in_data[i], 0,
platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be at least 0, "
"but received input (%d) less than 0",
p_in_data[i]));
PADDLE_ENFORCE_LT(
p_in_data[i], depth_,
"Illegal index value, should be less than depth (%d).", depth_);
platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be less than "
"Input(depth), "
"but received input (%d) not less than depth (%d)",
p_in_data[i], depth_));
*(p_out_data + i * depth_ + p_in_data[i]) = 1.0;
}
}
......
......@@ -1519,8 +1519,8 @@ def huber_loss(input, label, delta):
Args:
input (Variable): Predicted data, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32 or float64.
label (Variable): Ground truth label, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32 or float64.
input (Variable): Predicted data, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32.
label (Variable): Ground truth label, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32.
delta (float): The threshold for Huber loss, which is used to control the balance between the linear error and square error. The data type should be float32.
Returns:
......@@ -1549,6 +1549,10 @@ def huber_loss(input, label, delta):
print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32
"""
helper = LayerHelper('huber_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'huber_loss')
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'huber_loss')
residual = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
......
......@@ -5739,6 +5739,8 @@ def one_hot(input, depth, allow_out_of_range=False):
return out
helper = LayerHelper("one_hot", **locals())
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot')
check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot')
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable):
......@@ -6357,6 +6359,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
print(lrn.dtype) # float32
"""
helper = LayerHelper('lrn', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
dtype = helper.input_dtype()
input_shape = input.shape
dims = len(input_shape)
......
......@@ -17,6 +17,8 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
def huber_loss_forward(val, delta):
......@@ -79,5 +81,26 @@ def TestHuberLossOp2(TestHuberLossOp):
return (6, 6, 1)
class TestHuberLossOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input and label must be Variable
xw = np.random.random((6, 6)).astype("float32")
xr = fluid.data(name='xr', shape=[None, 6], dtype="float32")
lw = np.random.random((6, 6)).astype("float32")
lr = fluid.data(name='lr', shape=[None, 6], dtype="float32")
delta = 1.0
self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw, delta)
self.assertRaises(TypeError, fluid.layers.huber_loss, xw, lr, delta)
# the dtype of input and label must be float32 or float64
xw2 = fluid.data(name='xw2', shape=[None, 6], dtype="int32")
lw2 = fluid.data(name='lw2', shape=[None, 6], dtype="int32")
self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr,
delta)
self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2,
delta)
if __name__ == '__main__':
unittest.main()
......@@ -19,6 +19,7 @@ import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid import compiler, Program, program_guard
class TestLRNOp(OpTest):
......@@ -145,5 +146,13 @@ class TestLRNAPI(unittest.TestCase):
self.assertRaises(ValueError, _input_dim_size)
class TestLRNOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be float32
in_w = fluid.data(name="in_w", shape=[None, 3, 3, 3], dtype="int64")
self.assertRaises(TypeError, fluid.layers.lrn, in_w)
if __name__ == "__main__":
unittest.main()
......@@ -175,5 +175,29 @@ class TestOneHotOp_exception(unittest.TestCase):
self.assertRaises(core.EnforceNotMet, run)
class TestOneHotOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be Variable
in_w = np.random.random((4, 1)).astype("int32")
self.assertRaises(TypeError, fluid.layers.one_hot, in_w)
# the input must be int32 or int 64
in_w2 = fluid.layers.data(
name="in_w2",
shape=[4, 1],
append_batch_size=False,
dtype="float32")
self.assertRaises(TypeError, fluid.layers.one_hot, in_w2)
# the depth must be int, long or Variable
in_r = fluid.layers.data(
name="in_r",
shape=[4, 1],
append_batch_size=False,
dtype="int32")
depth_w = np.array([4])
self.assertRaises(TypeError, fluid.layers.one_hot, in_r, 4.1)
self.assertRaises(TypeError, fluid.layers.one_hot, in_r, depth_w)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册