未验证 提交 cd1de0e2 编写于 作者: C ceci3 提交者: GitHub

API/OP error message enhancement (#23691)

* error enhance,test=develop

* update,test=develop

* update type, test=develop

* replace inout_check, test=develop
上级 2787944c
......@@ -25,49 +25,32 @@ namespace paddle {
namespace operators {
void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true,
platform::errors::InvalidArgument(
"Input(Scale) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Bias"), true,
platform::errors::InvalidArgument(
"Input(Bias) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Mean"), true,
platform::errors::InvalidArgument(
"Input(Mean) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Variance"), true,
platform::errors::InvalidArgument(
"Input(Variance) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true,
platform::errors::InvalidArgument(
"Output(Y) of BatchNormOp should not be null."));
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNorm");
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNorm");
OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "BatchNorm");
OP_INOUT_CHECK(ctx->HasInput("Mean"), "Input", "Mean", "BatchNorm");
OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "Variance", "BatchNorm");
OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BatchNorm");
bool is_test = ctx->Attrs().Get<bool>("is_test");
if (!is_test) {
PADDLE_ENFORCE_EQ(
ctx->HasOutput("MeanOut"), true,
platform::errors::InvalidArgument(
"Output(MeanOut) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("VarianceOut"), true,
platform::errors::InvalidArgument(
"Output(VarianceOut) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedMean"), true,
platform::errors::InvalidArgument(
"Output(SavedMean) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedVariance"), true,
platform::errors::InvalidArgument(
"Output(SavedVariance) of BatchNormOp should not be null."));
OP_INOUT_CHECK(ctx->HasOutput("MeanOut"), "Output", "MeanOut", "BatchNorm");
OP_INOUT_CHECK(ctx->HasOutput("VarianceOut"), "Output", "VarianceOut",
"BatchNorm");
OP_INOUT_CHECK(ctx->HasOutput("SavedMean"), "Output", "SavedMean",
"BatchNorm");
OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"), "Output", "SavedVariance",
"BatchNorm");
}
// make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0],
"Mean and MeanOut should share the same memory");
PADDLE_ENFORCE_EQ(ctx->Inputs("Variance")[0], ctx->Outputs("VarianceOut")[0],
"Variance and VarianceOut should share the same memory");
platform::errors::InvalidArgument(
"Mean and MeanOut should share the same memory"));
PADDLE_ENFORCE_EQ(
ctx->Inputs("Variance")[0], ctx->Outputs("VarianceOut")[0],
platform::errors::InvalidArgument(
"Variance and VarianceOut should share the same memory"));
const auto x_dims = ctx->GetInputDim("X");
const DataLayout data_layout = framework::StringToDataLayout(
......@@ -103,16 +86,19 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
auto scale_dim = ctx->GetInputDim("Scale");
auto bias_dim = ctx->GetInputDim("Bias");
PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL,
PADDLE_ENFORCE_EQ(
scale_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of scale must equal to 1."
"But received: the shape of scale is [%s], the dimension "
"of scale is [%d]",
scale_dim, scale_dim.size());
PADDLE_ENFORCE_EQ(
bias_dim.size(), 1UL,
scale_dim, scale_dim.size()));
PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of bias must equal to 1."
"But received: the shape of bias is [%s],the dimension of bias is [%d]",
bias_dim, bias_dim.size());
"But received: the shape of bias is [%s],the dimension "
"of bias is [%d]",
bias_dim, bias_dim.size()));
bool check = true;
if ((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 ||
......@@ -122,13 +108,15 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
if (check) {
PADDLE_ENFORCE_EQ(scale_dim[0], C,
platform::errors::InvalidArgument(
"ShapeError: the shape of scale must equal to [%d]"
"But received: the shape of scale is [%d]",
C, scale_dim[0]);
C, scale_dim[0]));
PADDLE_ENFORCE_EQ(bias_dim[0], C,
platform::errors::InvalidArgument(
"ShapeError: the shape of bias must equal to [%d]"
"But received: the shape of bias is [%d]",
C, bias_dim[0]);
C, bias_dim[0]));
}
ctx->SetOutputDim("Y", x_dims);
ctx->SetOutputDim("MeanOut", {C});
......@@ -449,27 +437,23 @@ class BatchNormKernel<platform::CPUDeviceContext, T>
void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
// check input
PADDLE_ENFORCE_EQ(
ctx->HasInput("Scale"), true,
platform::errors::InvalidArgument("Input(scale) should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Y")), true,
platform::errors::InvalidArgument("Input(Y@GRAD) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true,
platform::errors::InvalidArgument(
"Input(SavedMean) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true,
platform::errors::InvalidArgument(
"Input(SavedVariance) should not be null"));
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNormGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
framework::GradVarName("Y"), "BatchNormGrad");
OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
"BatchNormGrad");
OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
"BatchNormGrad");
// check output
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
framework::GradVarName("X"), "BatchNormGrad");
const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale"));
const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias"));
PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad), true,
platform::errors::InvalidArgument(
platform::errors::NotFound(
"Output(Scale@GRAD) and Output(Bias@GRAD) must be null "
"or not be null at same time. But now, "
"has Scale@Grad=[%d], has Bias@GRAD=[%d]",
......@@ -489,7 +473,7 @@ void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
// so only infer shape in run time here.
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(ctx->HasInput("X") || ctx->HasInput("Y"), true,
platform::errors::InvalidArgument(
platform::errors::NotFound(
"Input(X) and Input(Y) should not be all null."));
auto input_name = "Y";
if (ctx->HasInput("X")) input_name = "X";
......
......@@ -46,21 +46,24 @@ class CosSimOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(
x_dims.size(), y_dims.size(),
platform::errors::InvalidArgument(
"Ranks of Input(X) [%s] and Input(Y) [%s] must be equal.", x_dims,
y_dims));
"ShapeError: Ranks of Input(X) and Input(Y) must be equal."
"But received: Ranks of Input(X) is [%d], Ranks of Input(Y) is "
"[%d]",
x_dims.size(), y_dims.size()));
PADDLE_ENFORCE_GE(
x_dims.size(), 2,
platform::errors::InvalidArgument(
"Rank of Input(X) %d must not be less than 2.", x_dims.size()));
"ShapeError: Rank of Input(X) must not be less than 2."
"But received: Ranks of Input(X) is [%d]",
x_dims.size()));
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
platform::errors::InvalidArgument(
"All dimensions except the 1st of Input(X) [%s] and Input(Y) [%s]"
"must be equal.",
x_dims, y_dims));
PADDLE_ENFORCE(
x_dims[0] == y_dims[0] || y_dims[0] == 1,
"All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal."));
PADDLE_ENFORCE_EQ(
x_dims[0] == y_dims[0] || y_dims[0] == 1, true,
platform::errors::InvalidArgument(
"The 1st dimension of Input(Y) %d must be equal to Input(X) %d or"
" just 1 (which will be broadcasted to match Input(X)).",
......@@ -136,14 +139,19 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
auto out_dims = ctx->GetInputDim("Out");
auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
PADDLE_ENFORCE_GE(
x_dims.size(), y_dims.size(),
platform::errors::InvalidArgument(
"Ranks of Input(X) %d and Input(Y) %d must be equal.",
"ShapeError: Ranks of Input(X) and Input(Y) must be equal."
"But received: Ranks of Input(X) is [%d], Ranks of Input(Y) is "
"[%d]",
x_dims.size(), y_dims.size()));
PADDLE_ENFORCE_GE(
x_dims.size(), 2,
platform::errors::InvalidArgument(
"Rank of Input(X) %d must not be less than 2.", x_dims.size()));
"ShapeError: Rank of Input(X) must not be less than 2."
"But received: Ranks of Input(X) is [%d]",
x_dims.size()));
PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
......
......@@ -26,7 +26,7 @@ class DropoutOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Dropout");
auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", x_dims);
......@@ -58,8 +58,9 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("dropout_prob", "Probability of setting units to zero.")
.SetDefault(.5f)
.AddCustomChecker([](const float& drop_p) {
PADDLE_ENFORCE(drop_p >= 0.0f && drop_p <= 1.0f,
"'dropout_prob' must be between 0.0 and 1.0.");
PADDLE_ENFORCE_EQ(drop_p >= 0.0f && drop_p <= 1.0f, true,
platform::errors::InvalidArgument(
"'dropout_prob' must be between 0.0 and 1.0."));
});
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
......@@ -91,10 +92,11 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
"efficient")
.SetDefault("downgrade_in_infer")
.AddCustomChecker([](const std::string& type) {
PADDLE_ENFORCE(
type == "downgrade_in_infer" || type == "upscale_in_train",
PADDLE_ENFORCE_EQ(
type == "downgrade_in_infer" || type == "upscale_in_train", true,
platform::errors::InvalidArgument(
"dropout_implementation can only be downgrade_in_infer or "
"upscale_in_train");
"upscale_in_train"));
});
AddComment(R"DOC(
......@@ -116,11 +118,12 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->Attrs().Get<bool>("is_test"), false,
"GradOp is only callable when is_test is false");
platform::errors::InvalidArgument(
"GradOp is only callable when is_test is false"));
PADDLE_ENFORCE(ctx->HasInput("Mask"), "Mask must not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) must not be null.");
OP_INOUT_CHECK(ctx->HasInput("Mask"), "Input", "Mask", "DropoutGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "DropoutGrad");
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
......
......@@ -23,27 +23,30 @@ namespace paddle {
namespace operators {
void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of Instance Norm Op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true,
"Input(Scale) of Instance Norm Op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Bias"), true,
"Input(Bias) of Instance Norm Op should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true,
"Output(Y) of Instance Norm Op should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedMean"), true,
"Output(SavedMean) of Instance Norm Op should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedVariance"), true,
"Output(SavedVariance) of Instance Norm Op should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InstanceNorm");
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "InstanceNorm");
OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "InstanceNorm");
OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "InstanceNorm");
OP_INOUT_CHECK(ctx->HasOutput("SavedMean"), "Output", "SavedMean",
"InstanceNorm");
OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"), "Output", "SavedVariance",
"InstanceNorm");
const auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"the dimension of input X must greater than or equal to 2");
PADDLE_ENFORCE_LE(x_dims.size(), 5,
"the dimension of input X must smaller than or equal to 5");
PADDLE_ENFORCE_GE(
x_dims.size(), 2,
platform::errors::InvalidArgument(
"ShapeError: the dimension of input X must "
"greater than or equal to 2. But received: the shape of input "
"X = [%s], the dimension of input X =[%d]",
x_dims, x_dims.size()));
PADDLE_ENFORCE_LE(
x_dims.size(), 5,
platform::errors::InvalidArgument(
"ShapeError: the dimension of input X must "
"smaller than or equal to 5, But received: the shape of input "
"X = [%s], the dimension of input X = [%d]",
x_dims, x_dims.size()));
auto N = x_dims[0];
auto C = x_dims[1];
auto NxC = N * C;
......@@ -51,15 +54,34 @@ void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const {
auto scale_dim = ctx->GetInputDim("Scale");
auto bias_dim = ctx->GetInputDim("Bias");
PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL);
PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL);
PADDLE_ENFORCE_EQ(
scale_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of scale must equal to 1."
"But received: the shape of scale is [%s], the dimension "
"of scale is [%d]",
scale_dim, scale_dim.size()));
PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of bias must equal to 1."
"But received: the shape of bias is [%s],the dimension "
"of bias is [%d]",
bias_dim, bias_dim.size()));
bool check = !((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 ||
framework::product(bias_dim) <= 0));
if (check) {
PADDLE_ENFORCE_EQ(scale_dim[0], C);
PADDLE_ENFORCE_EQ(bias_dim[0], C);
PADDLE_ENFORCE_EQ(scale_dim[0], C,
platform::errors::InvalidArgument(
"ShapeError: the shape of scale must equal to [%d]"
"But received: the shape of scale is [%d]",
C, scale_dim[0]));
PADDLE_ENFORCE_EQ(bias_dim[0], C,
platform::errors::InvalidArgument(
"ShapeError: the shape of bias must equal to [%d]"
"But received: the shape of bias is [%d]",
C, bias_dim[0]));
}
ctx->SetOutputDim("Y", x_dims);
......@@ -78,10 +100,12 @@ framework::OpKernelType InstanceNormOp::GetExpectedKernelType(
if (input_data_type == framework::proto::VarType::FP64) {
in_param_type = framework::proto::VarType::FP64;
}
PADDLE_ENFORCE_EQ(in_param_type, ctx.Input<Tensor>("Scale")->type(),
"Scale input should be of float type");
PADDLE_ENFORCE_EQ(in_param_type, ctx.Input<Tensor>("Bias")->type(),
"Bias input should be of float type");
PADDLE_ENFORCE_EQ(
in_param_type, ctx.Input<Tensor>("Scale")->type(),
platform::errors::InvalidArgument("Scale input should be of float type"));
PADDLE_ENFORCE_EQ(
in_param_type, ctx.Input<Tensor>("Bias")->type(),
platform::errors::InvalidArgument("Bias input should be of float type"));
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
......@@ -91,7 +115,8 @@ void InstanceNormOpMaker::Make() {
.SetDefault(1e-5)
.AddCustomChecker([](const float &epsilon) {
PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 0.001f, true,
"'epsilon' should be between 0.0 and 0.001.");
platform::errors::InvalidArgument(
"'epsilon' should be between 0.0 and 0.001."));
});
AddInput("X", "The input tensor");
AddInput("Scale",
......@@ -193,24 +218,21 @@ class InstanceNormKernel<platform::CPUDeviceContext, T>
};
void InstanceNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, "Input(X) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true,
"Input(scale) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Y")), true,
"Input(Y@GRAD) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true,
"Input(SavedMean) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true,
"Input(SavedVariance) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InstanceNormGrad");
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "InstanceNormGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
framework::GradVarName("Y"), "InstanceNormGrad");
OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
"InstanceNormGrad");
OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
"InstanceNormGrad");
// check output
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
"Output(x@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
framework::GradVarName("X"), "InstanceNormGrad");
if (ctx->HasOutput(framework::GradVarName("Scale"))) {
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Bias")), true,
"Output(Scale@GRAD) and Output(Bias@GRAD) should not be "
"null at the same time");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Bias")), "Output",
framework::GradVarName("Bias"), "InstanceNormGrad");
}
const auto x_dims = ctx->GetInputDim("X");
const int C = x_dims[1];
......@@ -333,21 +355,20 @@ class InstanceNormGradKernel<platform::CPUDeviceContext, T>
void InstanceNormDoubleGradOp::InferShape(
framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, "Input(X) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true,
"Input(Scale) should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true,
"Input(SavedMean) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true,
"Input(SavedVariance) should not be null");
PADDLE_ENFORCE_EQ(ctx->HasInput("DDX"), true,
"Input(DDX) should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("DY"), true,
"Input(Y@GRAD) should not be null");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InstanceNormDoubleGrad");
OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale",
"InstanceNormDoubleGrad");
OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
"InstanceNormDoubleGrad");
OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
"InstanceNormDoubleGrad");
OP_INOUT_CHECK(ctx->HasInput("DDX"), "Input", "DDX",
"InstanceNormDoubleGrad");
OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "InstanceNormDoubleGrad");
// check output
PADDLE_ENFORCE_EQ(ctx->HasOutput("DX"), true,
"Output(DX) should not be null");
OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX",
"InstanceNormDoubleGrad");
const auto x_dims = ctx->GetInputDim("X");
const int C = x_dims[1];
......
......@@ -1633,6 +1633,12 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002):
npair_loss = fluid.layers.npair_loss(anchor, positive, labels, l2_reg = 0.002)
'''
check_variable_and_dtype(anchor, 'anchor', ['float32', 'float64'],
'npair_loss')
check_variable_and_dtype(positive, 'positive', ['float32', 'float64'],
'positive')
check_variable_and_dtype(labels, 'labels', ['float32', 'float64', 'int64'],
'labels')
Beta = 0.25
batch_size = labels.shape[0]
......
......@@ -894,6 +894,8 @@ def cos_sim(X, Y):
y = fluid.data(name='y', shape=[1, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y)
"""
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
helper = LayerHelper('cos_sim', **locals())
out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
......@@ -3090,6 +3092,8 @@ def instance_norm(input,
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
assert bias_attr is not False, "bias_attr should not be False in instance_norm."
helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype()
......
......@@ -17,6 +17,8 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestCosSimOp(OpTest):
......@@ -105,5 +107,21 @@ class TestCosSimOp4(TestCosSimOp):
}
class TestCosSimOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of batch_norm must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
x2 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.cos_sim, x1, x2)
# the input dtype of batch_norm must be float32
x3 = fluid.layers.data(name='x3', shape=[3, 4, 5, 6], dtype="int32")
x4 = fluid.layers.data(name='x4', shape=[3, 4, 5, 6], dtype="int64")
self.assertRaises(TypeError, fluid.layers.cos_sim, x3, x4)
if __name__ == '__main__':
unittest.main()
......@@ -19,6 +19,7 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.op import Operator
from op_test import OpTest
from paddle.fluid import Program, program_guard
def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var):
......@@ -200,5 +201,18 @@ class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining):
self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD']
class TestInstanceNormOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of instance_norm must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.instance_norm, x1)
# the input dtype of instance_norm must be float32 or float64
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32")
self.assertRaises(TypeError, fluid.layers.instance_norm, x2)
if __name__ == '__main__':
unittest.main()
......@@ -18,6 +18,7 @@ import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
import numpy as np
from paddle.fluid import Program, program_guard
def npairloss(anchor, positive, labels, l2_reg=0.002):
......@@ -106,5 +107,74 @@ class TestNpairLossOp(unittest.TestCase):
atol=1e-3)
class TestNpairLossOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
anchor_np = np.random.random((2, 4)).astype("float32")
positive_np = np.random.random((2, 4)).astype("float32")
labels_np = np.random.random((2)).astype("float32")
anchor_data = fluid.data(
name='anchor', shape=[2, 4], dtype='float32')
positive_data = fluid.data(
name='positive', shape=[2, 4], dtype='float32')
labels_data = fluid.data(name='labels', shape=[2], dtype='float32')
def test_anchor_Variable():
# the anchor type must be Variable
fluid.layers.npair_loss(
anchor=anchor_np,
positive=positive_data,
labels=labels_data)
def test_positive_Variable():
# the positive type must be Variable
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_np,
labels=labels_data)
def test_labels_Variable():
# the labels type must be Variable
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_np)
self.assertRaises(TypeError, test_anchor_Variable)
self.assertRaises(TypeError, test_positive_Variable)
self.assertRaises(TypeError, test_labels_Variable)
def test_anchor_type():
# dtype must be float32 or float64
anchor_data1 = fluid.data(
name='anchor1', shape=[2, 4], dtype='int32')
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_np)
def test_positive_type():
# dtype must be float32 or float64
positive_data1 = fluid.data(
name='positive1', shape=[2, 4], dtype='int32')
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data1,
labels=labels_np)
def test_labels_type():
# dtype must be float32 or float64
labels_data1 = fluid.data(
name='labels1', shape=[2], dtype='int32')
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_data1)
self.assertRaises(TypeError, test_anchor_type)
self.assertRaises(TypeError, test_positive_type)
self.assertRaises(TypeError, test_labels_type)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册