未验证 提交 8af85922 编写于 作者: H huangjun12 提交者: GitHub

Error message enhancement of 6 op (#23759)

* enhance error message test=develop

* fix CI test of 3 op test=develop

* fix bug caused by the diff of long type in py2 and py3, test=develop
上级 318dfa0d
...@@ -25,21 +25,31 @@ class HingeLossOp : public framework::OperatorWithKernel { ...@@ -25,21 +25,31 @@ class HingeLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Logits"), OP_INOUT_CHECK(ctx->HasInput("Logits"), "Input", "Logits", "HingeLoss");
"Input(Logits) must be initialized."); OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "HingeLoss");
PADDLE_ENFORCE(ctx->HasInput("Labels"),
"Input(Labels) must be initialized.");
auto pred_dims = ctx->GetInputDim("Logits"); auto pred_dims = ctx->GetInputDim("Logits");
auto label_dims = ctx->GetInputDim("Labels"); auto label_dims = ctx->GetInputDim("Labels");
PADDLE_ENFORCE_EQ(pred_dims, label_dims); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(pred_dims.size(), 2, pred_dims, label_dims,
"The rank of Input(Logits) must be 2 and the shape is " platform::errors::InvalidArgument(
"[batch_size, 1]."); "The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
pred_dims, label_dims));
PADDLE_ENFORCE_EQ(
pred_dims.size(), 2,
platform::errors::InvalidArgument("Input(input) rank should be 2, "
"but received input rank(%d) != 2",
pred_dims.size()));
PADDLE_ENFORCE_EQ(pred_dims[1], 1, PADDLE_ENFORCE_EQ(pred_dims[1], 1,
"Each row of Input(Logits) contains a real value, " platform::errors::InvalidArgument(
"so the 2nd dimension of Input(Logits) must be 1."); "The second dimension of Input(input) should be 1, "
"as each row of input contains a real value, "
"but received second dimension of input (%d) != 1",
pred_dims[1]));
ctx->SetOutputDim("Loss", {pred_dims[0], 1}); ctx->SetOutputDim("Loss", {pred_dims[0], 1});
ctx->ShareLoD("Logits", "Loss"); ctx->ShareLoD("Logits", "Loss");
...@@ -81,19 +91,22 @@ class HingeLossGradOp : public framework::OperatorWithKernel { ...@@ -81,19 +91,22 @@ class HingeLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Logits"), OP_INOUT_CHECK(ctx->HasInput("Logits"), "Input", "Logits", "HingeLossGrad");
"Input(Logits) should not be null."); OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "HingeLossGrad");
PADDLE_ENFORCE(ctx->HasInput("Labels"), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Loss")), "Input",
"Input(Labels) should not be null."); "Loss@GRAD", "HingeLossGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Logits")), "Output",
"Input(Loss@GRAD) should not be null."); "Logits@GRAD", "HingeLossGrad");
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")),
"Input(Logits@GRAD) should not be null.");
auto pred_dims = ctx->GetInputDim("Logits"); auto pred_dims = ctx->GetInputDim("Logits");
auto loss_grad_dims = ctx->GetInputDim(framework::GradVarName("Loss")); auto loss_grad_dims = ctx->GetInputDim(framework::GradVarName("Loss"));
PADDLE_ENFORCE_EQ(loss_grad_dims, pred_dims); PADDLE_ENFORCE_EQ(loss_grad_dims, pred_dims,
platform::errors::InvalidArgument(
"The shape of loss gradient should be the same as "
"the shape of Input(input), but received the loss "
"gradient shape [%s] != input shape [%s]",
loss_grad_dims, pred_dims));
auto pred_grad_name = framework::GradVarName("Logits"); auto pred_grad_name = framework::GradVarName("Logits");
ctx->SetOutputDim(pred_grad_name, pred_dims); ctx->SetOutputDim(pred_grad_name, pred_dims);
......
...@@ -25,23 +25,27 @@ class HuberLossOp : public framework::OperatorWithKernel { ...@@ -25,23 +25,27 @@ class HuberLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "HuberLoss");
"Input(X) must be initialized."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "HuberLoss");
PADDLE_ENFORCE_EQ(ctx->HasInput("Y"), true,
"Input(Y) must be initialized.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(),
"The rank of Input(X) should be equal to " platform::errors::InvalidArgument(
"the rank of Input(Y)."); "Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)",
x_dims.size(), y_dims.size()));
bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) || bool contain_unknown_dim = framework::contain_unknown_dim(x_dims) ||
framework::contain_unknown_dim(y_dims); framework::contain_unknown_dim(y_dims);
if (ctx->IsRuntime() || !contain_unknown_dim) { if (ctx->IsRuntime() || !contain_unknown_dim) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims, y_dims, x_dims, y_dims,
"The Input(X) and Input(Label) should have the same shape."); platform::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
x_dims, y_dims));
} }
auto out_dims = y_dims; auto out_dims = y_dims;
...@@ -99,8 +103,8 @@ class HuberLossGradOp : public framework::OperatorWithKernel { ...@@ -99,8 +103,8 @@ class HuberLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null."); "Out@GRAD", "HuberLossGrad");
auto residual_dims = ctx->GetInputDim("Residual"); auto residual_dims = ctx->GetInputDim("Residual");
......
...@@ -169,17 +169,25 @@ class LRNOp : public framework::OperatorWithKernel { ...@@ -169,17 +169,25 @@ class LRNOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LRNOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "LRN");
PADDLE_ENFORCE(ctx->HasOutput("Out"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "LRN");
"Output(Out) of LRNOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("MidOut"), "Output", "MidOut", "LRN");
PADDLE_ENFORCE(ctx->HasOutput("MidOut"),
"MidOut(Out) of LRNOp should not be null.");
auto x_dim = ctx->GetInputDim("X"); auto x_dim = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dim.size(), 4, "Input(X)'rank of LRNOp should be 4."); PADDLE_ENFORCE_EQ(x_dim.size(), 4, platform::errors::InvalidArgument(
"Input(input) rank should be 4, "
"but received input rank (%d) != 4",
x_dim.size()));
int n = ctx->Attrs().Get<int>("n"); int n = ctx->Attrs().Get<int>("n");
PADDLE_ENFORCE(n > 0 && n % 2 == 1, "n should be positive odd value"); PADDLE_ENFORCE_GT(n, 0UL, platform::errors::InvalidArgument(
"Argument(n) should be positive, "
"but received n(%d) not greater than 0",
n));
PADDLE_ENFORCE_EQ(n % 2, 1UL, platform::errors::InvalidArgument(
"Argument(n) should be odd value, "
"but received n(%d) is not an odd value",
n));
ctx->SetOutputDim("Out", x_dim); ctx->SetOutputDim("Out", x_dim);
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
...@@ -317,10 +325,10 @@ class LRNOpGrad : public framework::OperatorWithKernel { ...@@ -317,10 +325,10 @@ class LRNOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "LRNGrad");
PADDLE_ENFORCE(ctx->HasInput("MidOut"), "Input(MidOut) should not be null"); OP_INOUT_CHECK(ctx->HasInput("MidOut"), "Input", "MidOu", "LRNGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) should not be null"); "Out@GRAD", "LRNGrad");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
......
...@@ -68,9 +68,18 @@ class LRNKernel : public framework::OpKernel<T> { ...@@ -68,9 +68,18 @@ class LRNKernel : public framework::OpKernel<T> {
T beta = ctx.Attr<float>("beta"); T beta = ctx.Attr<float>("beta");
T k = ctx.Attr<float>("k"); T k = ctx.Attr<float>("k");
PADDLE_ENFORCE(alpha >= 0.0, "alpha should >= 0.0"); PADDLE_ENFORCE_GE(alpha, 0UL, platform::errors::InvalidArgument(
PADDLE_ENFORCE(beta >= 0.0, "beta should >= 0.0"); "Argument(alpha) should >= 0.0, "
PADDLE_ENFORCE(k >= 0.0, "k should >= 0.0"); "but received alpha(%d) less than 0",
alpha));
PADDLE_ENFORCE_GE(beta, 0UL, platform::errors::InvalidArgument(
"Argument(beta) should >= 0.0, "
"but received beta(%d) less than 0",
beta));
PADDLE_ENFORCE_GE(k, 0UL, platform::errors::InvalidArgument(
"Argument(k) should >= 0.0, "
"but received k(%d) less than 0",
k));
LRNFunctor<DeviceContext, T> f; LRNFunctor<DeviceContext, T> f;
f(ctx, x, out, mid, N, C, H, W, n, k, alpha, beta, data_layout); f(ctx, x, out, mid, N, C, H, W, n, k, alpha, beta, data_layout);
...@@ -132,9 +141,11 @@ class LRNGradKernel : public framework::OpKernel<T> { ...@@ -132,9 +141,11 @@ class LRNGradKernel : public framework::OpKernel<T> {
T alpha = ctx.Attr<T>("alpha"); T alpha = ctx.Attr<T>("alpha");
T beta = ctx.Attr<T>("beta"); T beta = ctx.Attr<T>("beta");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
!ctx.Attr<bool>("is_test"), !ctx.Attr<bool>("is_test"), true,
"is_test attribute should be set to False in training phase."); platform::errors::InvalidArgument(
"is_test attribute should be set to False in training phase. "
"but received is_test == True in training phase."));
LRNGradFunctor<DeviceContext, T> f; LRNGradFunctor<DeviceContext, T> f;
f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta, data_layout); f(ctx, x, out, mid, x_g, out_g, N, C, H, W, n, alpha, beta, data_layout);
......
...@@ -23,21 +23,33 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { ...@@ -23,21 +23,33 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "X must be initialized."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ModifiedHuberLoss");
PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ModifiedHuberLoss");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims.size(), 2, "The tensor rank of X must be 2."); PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument(
"Input(input) rank should be 2, "
"but received input rank(%d) != 2",
x_dims.size()));
if (ctx->IsRuntime() || if (ctx->IsRuntime() ||
(framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) { (framework::product(x_dims) > 0 && framework::product(y_dims) > 0)) {
PADDLE_ENFORCE_EQ(x_dims, y_dims, PADDLE_ENFORCE_EQ(
"The shape of X and Y must be the same."); x_dims, y_dims,
platform::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
x_dims, y_dims));
} }
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(x_dims[1], 1, "The 2nd dimension of X must be 1."); PADDLE_ENFORCE_EQ(x_dims[1], 1,
platform::errors::InvalidArgument(
"The second dimension of Input(input) should be 1, "
"but received second dimension of input (%d) != 1",
x_dims[1]));
} }
ctx->SetOutputDim("IntermediateVal", x_dims); ctx->SetOutputDim("IntermediateVal", x_dims);
...@@ -87,11 +99,11 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { ...@@ -87,11 +99,11 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Y"), "Y must be initialized."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ModifiedHuberLossGrad");
PADDLE_ENFORCE(ctx->HasInput("IntermediateVal"), OP_INOUT_CHECK(ctx->HasInput("IntermediateVal"), "Input", "IntermediateVal",
"Intermediate value must not be null."); "ModifiedHuberLossGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")), "Input",
"Input(Out@Grad) must not be null."); "Out@GRAD", "ModifiedHuberLossGrad");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
auto intermediate_dims = ctx->GetInputDim("IntermediateVal"); auto intermediate_dims = ctx->GetInputDim("IntermediateVal");
...@@ -100,9 +112,20 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { ...@@ -100,9 +112,20 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel {
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
intermediate_dims, y_dims, intermediate_dims, y_dims,
"The shape of X and intermediate value must be the same."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(out_grad_dims, y_dims, "The shape of Intermediate variable which will be reused in "
"The shape of Input(Out@Grad) and X must be the same."); "backward processing should the same as "
"the shape of Input(label), but received Intermediate variable "
"shape [%s] != label shape [%s]",
intermediate_dims, y_dims));
PADDLE_ENFORCE_EQ(
out_grad_dims, y_dims,
platform::errors::InvalidArgument(
"The shape of output gradient should be the same as "
"the shape of Input(label), but received the output gradient "
"shape [%s] != label shape [%s]",
out_grad_dims, y_dims));
} }
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
......
...@@ -24,17 +24,22 @@ class OneHotOp : public framework::OperatorWithKernel { ...@@ -24,17 +24,22 @@ class OneHotOp : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "OneHot");
"Input(X) of OneHotOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "OneHot");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of OneHotOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2, PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) should be at least 2."); platform::errors::InvalidArgument(
"Input(input) rank should be at least 2, "
"but received input rank (%d) less than 2",
x_dims.size()));
if (ctx->IsRuntime() || x_dims[x_dims.size() - 1] > 0) { if (ctx->IsRuntime() || x_dims[x_dims.size() - 1] > 0) {
PADDLE_ENFORCE_GE(x_dims[x_dims.size() - 1], 1U, PADDLE_ENFORCE_GE(x_dims[x_dims.size() - 1], 1U,
"Last dimension of Input(X) should be 1."); platform::errors::InvalidArgument(
"Last dimension of Input(input) should be 1, "
"but received input Last dimension(%d) != 1",
x_dims[x_dims.size() - 1]));
} }
framework::DDim out_dims(x_dims); framework::DDim out_dims(x_dims);
......
...@@ -51,11 +51,20 @@ struct OneHotOpFunctor { ...@@ -51,11 +51,20 @@ struct OneHotOpFunctor {
} }
} else { } else {
for (int i = 0; i < numel; ++i) { for (int i = 0; i < numel; ++i) {
PADDLE_ENFORCE_GE(p_in_data[i], 0, PADDLE_ENFORCE_GE(
"Illegal index value, should be at least 0."); p_in_data[i], 0,
platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be at least 0, "
"but received input (%d) less than 0",
p_in_data[i]));
PADDLE_ENFORCE_LT( PADDLE_ENFORCE_LT(
p_in_data[i], depth_, p_in_data[i], depth_,
"Illegal index value, should be less than depth (%d).", depth_); platform::errors::InvalidArgument(
"Illegal index value, Input(input) value should be less than "
"Input(depth), "
"but received input (%d) not less than depth (%d)",
p_in_data[i], depth_));
*(p_out_data + i * depth_ + p_in_data[i]) = 1.0; *(p_out_data + i * depth_ + p_in_data[i]) = 1.0;
} }
} }
......
...@@ -1525,8 +1525,8 @@ def huber_loss(input, label, delta): ...@@ -1525,8 +1525,8 @@ def huber_loss(input, label, delta):
Args: Args:
input (Variable): Predicted data, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32 or float64. input (Variable): Predicted data, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32.
label (Variable): Ground truth label, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32 or float64. label (Variable): Ground truth label, 2D-Tensor with the shape of [batch_size, 1]. The data type should be float32.
delta (float): The threshold for Huber loss, which is used to control the balance between the linear error and square error. The data type should be float32. delta (float): The threshold for Huber loss, which is used to control the balance between the linear error and square error. The data type should be float32.
Returns: Returns:
...@@ -1555,6 +1555,10 @@ def huber_loss(input, label, delta): ...@@ -1555,6 +1555,10 @@ def huber_loss(input, label, delta):
print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32 print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32
""" """
helper = LayerHelper('huber_loss', **locals()) helper = LayerHelper('huber_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'huber_loss')
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'huber_loss')
residual = helper.create_variable_for_type_inference( residual = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()) dtype=helper.input_dtype())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
......
...@@ -5781,6 +5781,8 @@ def one_hot(input, depth, allow_out_of_range=False): ...@@ -5781,6 +5781,8 @@ def one_hot(input, depth, allow_out_of_range=False):
return out return out
helper = LayerHelper("one_hot", **locals()) helper = LayerHelper("one_hot", **locals())
check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot')
check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot')
one_hot_out = helper.create_variable_for_type_inference(dtype='float32') one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(depth, Variable): if not isinstance(depth, Variable):
...@@ -6392,6 +6394,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None, ...@@ -6392,6 +6394,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
print(lrn.dtype) # float32 print(lrn.dtype) # float32
""" """
helper = LayerHelper('lrn', **locals()) helper = LayerHelper('lrn', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
dtype = helper.input_dtype() dtype = helper.input_dtype()
input_shape = input.shape input_shape = input.shape
dims = len(input_shape) dims = len(input_shape)
......
...@@ -17,6 +17,8 @@ from __future__ import print_function ...@@ -17,6 +17,8 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
def huber_loss_forward(val, delta): def huber_loss_forward(val, delta):
...@@ -79,5 +81,23 @@ def TestHuberLossOp2(TestHuberLossOp): ...@@ -79,5 +81,23 @@ def TestHuberLossOp2(TestHuberLossOp):
return (6, 6, 1) return (6, 6, 1)
class TestHuberLossOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input and label must be Variable
xw = np.random.random((6, 6)).astype("float32")
xr = fluid.data(name='xr', shape=[None, 6], dtype="float32")
lw = np.random.random((6, 6)).astype("float32")
lr = fluid.data(name='lr', shape=[None, 6], dtype="float32")
self.assertRaises(TypeError, fluid.layers.huber_loss, xw, lr)
self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw)
# the dtype of input and label must be float32 or float64
xw2 = fluid.data(name='xw2', shape=[None, 6], dtype="int32")
lw2 = fluid.data(name='lw2', shape=[None, 6], dtype="int32")
self.assertRaises(TypeError, fluid.layers.huber_loss, xw2, lr)
self.assertRaises(TypeError, fluid.layers.huber_loss, xr, lw2)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest from op_test import OpTest
from paddle.fluid import compiler, Program, program_guard
class TestLRNOp(OpTest): class TestLRNOp(OpTest):
...@@ -145,5 +146,13 @@ class TestLRNAPI(unittest.TestCase): ...@@ -145,5 +146,13 @@ class TestLRNAPI(unittest.TestCase):
self.assertRaises(ValueError, _input_dim_size) self.assertRaises(ValueError, _input_dim_size)
class TestLRNOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be float32
in_w = fluid.data(name="in_w", shape=[None, 3, 3, 3], dtype="int64")
self.assertRaises(TypeError, fluid.layers.lrn, in_w)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -175,5 +175,29 @@ class TestOneHotOp_exception(unittest.TestCase): ...@@ -175,5 +175,29 @@ class TestOneHotOp_exception(unittest.TestCase):
self.assertRaises(core.EnforceNotMet, run) self.assertRaises(core.EnforceNotMet, run)
class TestOneHotOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be Variable
in_w = np.random.random((4, 1)).astype("int32")
self.assertRaises(TypeError, fluid.layers.one_hot, in_w)
# the input must be int32 or int 64
in_w2 = fluid.layers.data(
name="in_w2",
shape=[4, 1],
append_batch_size=False,
dtype="float32")
self.assertRaises(TypeError, fluid.layers.one_hot, in_w2)
# the depth must be int, long or Variable
in_r = fluid.layers.data(
name="in_r",
shape=[4, 1],
append_batch_size=False,
dtype="int32")
depth_w = np.array([4])
self.assertRaises(TypeError, fluid.layers.one_hot, in_r, 4.1)
self.assertRaises(TypeError, fluid.layers.one_hot, in_r, depth_w)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册