未验证 提交 cd1de0e2 编写于 作者: C ceci3 提交者: GitHub

API/OP error message enhancement (#23691)

* error enhance,test=develop

* update,test=develop

* update type, test=develop

* replace inout_check, test=develop
上级 2787944c
...@@ -25,49 +25,32 @@ namespace paddle { ...@@ -25,49 +25,32 @@ namespace paddle {
namespace operators { namespace operators {
void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const { void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNorm");
platform::errors::InvalidArgument( OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNorm");
"Input(X) of BatchNormOp should not be null.")); OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "BatchNorm");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true, OP_INOUT_CHECK(ctx->HasInput("Mean"), "Input", "Mean", "BatchNorm");
platform::errors::InvalidArgument( OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "Variance", "BatchNorm");
"Input(Scale) of BatchNormOp should not be null.")); OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BatchNorm");
PADDLE_ENFORCE_EQ(ctx->HasInput("Bias"), true,
platform::errors::InvalidArgument(
"Input(Bias) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Mean"), true,
platform::errors::InvalidArgument(
"Input(Mean) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("Variance"), true,
platform::errors::InvalidArgument(
"Input(Variance) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true,
platform::errors::InvalidArgument(
"Output(Y) of BatchNormOp should not be null."));
bool is_test = ctx->Attrs().Get<bool>("is_test"); bool is_test = ctx->Attrs().Get<bool>("is_test");
if (!is_test) { if (!is_test) {
PADDLE_ENFORCE_EQ( OP_INOUT_CHECK(ctx->HasOutput("MeanOut"), "Output", "MeanOut", "BatchNorm");
ctx->HasOutput("MeanOut"), true, OP_INOUT_CHECK(ctx->HasOutput("VarianceOut"), "Output", "VarianceOut",
platform::errors::InvalidArgument( "BatchNorm");
"Output(MeanOut) of BatchNormOp should not be null.")); OP_INOUT_CHECK(ctx->HasOutput("SavedMean"), "Output", "SavedMean",
PADDLE_ENFORCE_EQ( "BatchNorm");
ctx->HasOutput("VarianceOut"), true, OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"), "Output", "SavedVariance",
platform::errors::InvalidArgument( "BatchNorm");
"Output(VarianceOut) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedMean"), true,
platform::errors::InvalidArgument(
"Output(SavedMean) of BatchNormOp should not be null."));
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedVariance"), true,
platform::errors::InvalidArgument(
"Output(SavedVariance) of BatchNormOp should not be null."));
} }
// make sure Mean/MeanOut and Variance/VarianceOut share memory in Python // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0], PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0],
"Mean and MeanOut should share the same memory"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(ctx->Inputs("Variance")[0], ctx->Outputs("VarianceOut")[0], "Mean and MeanOut should share the same memory"));
"Variance and VarianceOut should share the same memory"); PADDLE_ENFORCE_EQ(
ctx->Inputs("Variance")[0], ctx->Outputs("VarianceOut")[0],
platform::errors::InvalidArgument(
"Variance and VarianceOut should share the same memory"));
const auto x_dims = ctx->GetInputDim("X"); const auto x_dims = ctx->GetInputDim("X");
const DataLayout data_layout = framework::StringToDataLayout( const DataLayout data_layout = framework::StringToDataLayout(
...@@ -103,16 +86,19 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -103,16 +86,19 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
auto scale_dim = ctx->GetInputDim("Scale"); auto scale_dim = ctx->GetInputDim("Scale");
auto bias_dim = ctx->GetInputDim("Bias"); auto bias_dim = ctx->GetInputDim("Bias");
PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL,
"ShapeError: the dimension of scale must equal to 1."
"But received: the shape of scale is [%s], the dimension "
"of scale is [%d]",
scale_dim, scale_dim.size());
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
bias_dim.size(), 1UL, scale_dim.size(), 1UL,
"ShapeError: the dimension of bias must equal to 1." platform::errors::InvalidArgument(
"But received: the shape of bias is [%s],the dimension of bias is [%d]", "ShapeError: the dimension of scale must equal to 1."
bias_dim, bias_dim.size()); "But received: the shape of scale is [%s], the dimension "
"of scale is [%d]",
scale_dim, scale_dim.size()));
PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of bias must equal to 1."
"But received: the shape of bias is [%s],the dimension "
"of bias is [%d]",
bias_dim, bias_dim.size()));
bool check = true; bool check = true;
if ((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 || if ((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 ||
...@@ -122,13 +108,15 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -122,13 +108,15 @@ void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
if (check) { if (check) {
PADDLE_ENFORCE_EQ(scale_dim[0], C, PADDLE_ENFORCE_EQ(scale_dim[0], C,
"ShapeError: the shape of scale must equal to [%d]" platform::errors::InvalidArgument(
"But received: the shape of scale is [%d]", "ShapeError: the shape of scale must equal to [%d]"
C, scale_dim[0]); "But received: the shape of scale is [%d]",
C, scale_dim[0]));
PADDLE_ENFORCE_EQ(bias_dim[0], C, PADDLE_ENFORCE_EQ(bias_dim[0], C,
"ShapeError: the shape of bias must equal to [%d]" platform::errors::InvalidArgument(
"But received: the shape of bias is [%d]", "ShapeError: the shape of bias must equal to [%d]"
C, bias_dim[0]); "But received: the shape of bias is [%d]",
C, bias_dim[0]));
} }
ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("Y", x_dims);
ctx->SetOutputDim("MeanOut", {C}); ctx->SetOutputDim("MeanOut", {C});
...@@ -449,27 +437,23 @@ class BatchNormKernel<platform::CPUDeviceContext, T> ...@@ -449,27 +437,23 @@ class BatchNormKernel<platform::CPUDeviceContext, T>
void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const { void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
// check input // check input
PADDLE_ENFORCE_EQ( OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNormGrad");
ctx->HasInput("Scale"), true, OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
platform::errors::InvalidArgument("Input(scale) should not be null.")); framework::GradVarName("Y"), "BatchNormGrad");
PADDLE_ENFORCE_EQ( OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
ctx->HasInput(framework::GradVarName("Y")), true, "BatchNormGrad");
platform::errors::InvalidArgument("Input(Y@GRAD) should not be null.")); OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true, "BatchNormGrad");
platform::errors::InvalidArgument(
"Input(SavedMean) should not be null."));
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true,
platform::errors::InvalidArgument(
"Input(SavedVariance) should not be null"));
// check output // check output
PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), ""); OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
framework::GradVarName("X"), "BatchNormGrad");
const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale")); const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale"));
const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias")); const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias"));
PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad), true, PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad), true,
platform::errors::InvalidArgument( platform::errors::NotFound(
"Output(Scale@GRAD) and Output(Bias@GRAD) must be null " "Output(Scale@GRAD) and Output(Bias@GRAD) must be null "
"or not be null at same time. But now, " "or not be null at same time. But now, "
"has Scale@Grad=[%d], has Bias@GRAD=[%d]", "has Scale@Grad=[%d], has Bias@GRAD=[%d]",
...@@ -489,7 +473,7 @@ void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -489,7 +473,7 @@ void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
// so only infer shape in run time here. // so only infer shape in run time here.
if (ctx->IsRuntime()) { if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(ctx->HasInput("X") || ctx->HasInput("Y"), true, PADDLE_ENFORCE_EQ(ctx->HasInput("X") || ctx->HasInput("Y"), true,
platform::errors::InvalidArgument( platform::errors::NotFound(
"Input(X) and Input(Y) should not be all null.")); "Input(X) and Input(Y) should not be all null."));
auto input_name = "Y"; auto input_name = "Y";
if (ctx->HasInput("X")) input_name = "X"; if (ctx->HasInput("X")) input_name = "X";
......
...@@ -46,21 +46,24 @@ class CosSimOp : public framework::OperatorWithKernel { ...@@ -46,21 +46,24 @@ class CosSimOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims.size(), y_dims.size(), x_dims.size(), y_dims.size(),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Ranks of Input(X) [%s] and Input(Y) [%s] must be equal.", x_dims, "ShapeError: Ranks of Input(X) and Input(Y) must be equal."
y_dims)); "But received: Ranks of Input(X) is [%d], Ranks of Input(Y) is "
"[%d]",
x_dims.size(), y_dims.size()));
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
x_dims.size(), 2, x_dims.size(), 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Rank of Input(X) %d must not be less than 2.", x_dims.size())); "ShapeError: Rank of Input(X) must not be less than 2."
"But received: Ranks of Input(X) is [%d]",
x_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 1, x_dims.size()), framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()), framework::slice_ddim(y_dims, 1, y_dims.size()),
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"All dimensions except the 1st of Input(X) [%s] and Input(Y) [%s]" "All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal.", "must be equal."));
x_dims, y_dims)); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE( x_dims[0] == y_dims[0] || y_dims[0] == 1, true,
x_dims[0] == y_dims[0] || y_dims[0] == 1,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The 1st dimension of Input(Y) %d must be equal to Input(X) %d or" "The 1st dimension of Input(Y) %d must be equal to Input(X) %d or"
" just 1 (which will be broadcasted to match Input(X)).", " just 1 (which will be broadcasted to match Input(X)).",
...@@ -136,14 +139,19 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -136,14 +139,19 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
auto out_dims = ctx->GetInputDim("Out"); auto out_dims = ctx->GetInputDim("Out");
auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto out_grad_dims = ctx->GetInputDim(framework::GradVarName("Out"));
PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), PADDLE_ENFORCE_GE(
platform::errors::InvalidArgument( x_dims.size(), y_dims.size(),
"Ranks of Input(X) %d and Input(Y) %d must be equal.", platform::errors::InvalidArgument(
x_dims.size(), y_dims.size())); "ShapeError: Ranks of Input(X) and Input(Y) must be equal."
"But received: Ranks of Input(X) is [%d], Ranks of Input(Y) is "
"[%d]",
x_dims.size(), y_dims.size()));
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
x_dims.size(), 2, x_dims.size(), 2,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Rank of Input(X) %d must not be less than 2.", x_dims.size())); "ShapeError: Rank of Input(X) must not be less than 2."
"But received: Ranks of Input(X) is [%d]",
x_dims.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
framework::slice_ddim(x_dims, 1, x_dims.size()), framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()), framework::slice_ddim(y_dims, 1, y_dims.size()),
......
...@@ -26,7 +26,7 @@ class DropoutOp : public framework::OperatorWithKernel { ...@@ -26,7 +26,7 @@ class DropoutOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Dropout");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
ctx->SetOutputDim("Out", x_dims); ctx->SetOutputDim("Out", x_dims);
...@@ -58,8 +58,9 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -58,8 +58,9 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("dropout_prob", "Probability of setting units to zero.") AddAttr<float>("dropout_prob", "Probability of setting units to zero.")
.SetDefault(.5f) .SetDefault(.5f)
.AddCustomChecker([](const float& drop_p) { .AddCustomChecker([](const float& drop_p) {
PADDLE_ENFORCE(drop_p >= 0.0f && drop_p <= 1.0f, PADDLE_ENFORCE_EQ(drop_p >= 0.0f && drop_p <= 1.0f, true,
"'dropout_prob' must be between 0.0 and 1.0."); platform::errors::InvalidArgument(
"'dropout_prob' must be between 0.0 and 1.0."));
}); });
AddAttr<bool>("is_test", AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false " "(bool, default false) Set to true for inference only, false "
...@@ -91,10 +92,11 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -91,10 +92,11 @@ class DropoutOpMaker : public framework::OpProtoAndCheckerMaker {
"efficient") "efficient")
.SetDefault("downgrade_in_infer") .SetDefault("downgrade_in_infer")
.AddCustomChecker([](const std::string& type) { .AddCustomChecker([](const std::string& type) {
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(
type == "downgrade_in_infer" || type == "upscale_in_train", type == "downgrade_in_infer" || type == "upscale_in_train", true,
"dropout_implementation can only be downgrade_in_infer or " platform::errors::InvalidArgument(
"upscale_in_train"); "dropout_implementation can only be downgrade_in_infer or "
"upscale_in_train"));
}); });
AddComment(R"DOC( AddComment(R"DOC(
...@@ -116,11 +118,12 @@ class DropoutOpGrad : public framework::OperatorWithKernel { ...@@ -116,11 +118,12 @@ class DropoutOpGrad : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->Attrs().Get<bool>("is_test"), false, PADDLE_ENFORCE_EQ(ctx->Attrs().Get<bool>("is_test"), false,
"GradOp is only callable when is_test is false"); platform::errors::InvalidArgument(
"GradOp is only callable when is_test is false"));
PADDLE_ENFORCE(ctx->HasInput("Mask"), "Mask must not be null."); OP_INOUT_CHECK(ctx->HasInput("Mask"), "Input", "Mask", "DropoutGrad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Input(Out@GRAD) must not be null."); framework::GradVarName("Out"), "DropoutGrad");
auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out"));
......
...@@ -23,27 +23,30 @@ namespace paddle { ...@@ -23,27 +23,30 @@ namespace paddle {
namespace operators { namespace operators {
void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const { void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InstanceNorm");
"Input(X) of Instance Norm Op should not be null."); OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "InstanceNorm");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true, OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "InstanceNorm");
"Input(Scale) of Instance Norm Op should not be null."); OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "InstanceNorm");
PADDLE_ENFORCE_EQ(ctx->HasInput("Bias"), true, OP_INOUT_CHECK(ctx->HasOutput("SavedMean"), "Output", "SavedMean",
"Input(Bias) of Instance Norm Op should not be null."); "InstanceNorm");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true, OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"), "Output", "SavedVariance",
"Output(Y) of Instance Norm Op should not be null."); "InstanceNorm");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedMean"), true,
"Output(SavedMean) of Instance Norm Op should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("SavedVariance"), true,
"Output(SavedVariance) of Instance Norm Op should not be null.");
const auto x_dims = ctx->GetInputDim("X"); const auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_GE(x_dims.size(), 2, PADDLE_ENFORCE_GE(
"the dimension of input X must greater than or equal to 2"); x_dims.size(), 2,
PADDLE_ENFORCE_LE(x_dims.size(), 5, platform::errors::InvalidArgument(
"the dimension of input X must smaller than or equal to 5"); "ShapeError: the dimension of input X must "
"greater than or equal to 2. But received: the shape of input "
"X = [%s], the dimension of input X =[%d]",
x_dims, x_dims.size()));
PADDLE_ENFORCE_LE(
x_dims.size(), 5,
platform::errors::InvalidArgument(
"ShapeError: the dimension of input X must "
"smaller than or equal to 5, But received: the shape of input "
"X = [%s], the dimension of input X = [%d]",
x_dims, x_dims.size()));
auto N = x_dims[0]; auto N = x_dims[0];
auto C = x_dims[1]; auto C = x_dims[1];
auto NxC = N * C; auto NxC = N * C;
...@@ -51,15 +54,34 @@ void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -51,15 +54,34 @@ void InstanceNormOp::InferShape(framework::InferShapeContext *ctx) const {
auto scale_dim = ctx->GetInputDim("Scale"); auto scale_dim = ctx->GetInputDim("Scale");
auto bias_dim = ctx->GetInputDim("Bias"); auto bias_dim = ctx->GetInputDim("Bias");
PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL); scale_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of scale must equal to 1."
"But received: the shape of scale is [%s], the dimension "
"of scale is [%d]",
scale_dim, scale_dim.size()));
PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL,
platform::errors::InvalidArgument(
"ShapeError: the dimension of bias must equal to 1."
"But received: the shape of bias is [%s],the dimension "
"of bias is [%d]",
bias_dim, bias_dim.size()));
bool check = !((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 || bool check = !((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 ||
framework::product(bias_dim) <= 0)); framework::product(bias_dim) <= 0));
if (check) { if (check) {
PADDLE_ENFORCE_EQ(scale_dim[0], C); PADDLE_ENFORCE_EQ(scale_dim[0], C,
PADDLE_ENFORCE_EQ(bias_dim[0], C); platform::errors::InvalidArgument(
"ShapeError: the shape of scale must equal to [%d]"
"But received: the shape of scale is [%d]",
C, scale_dim[0]));
PADDLE_ENFORCE_EQ(bias_dim[0], C,
platform::errors::InvalidArgument(
"ShapeError: the shape of bias must equal to [%d]"
"But received: the shape of bias is [%d]",
C, bias_dim[0]));
} }
ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("Y", x_dims);
...@@ -78,10 +100,12 @@ framework::OpKernelType InstanceNormOp::GetExpectedKernelType( ...@@ -78,10 +100,12 @@ framework::OpKernelType InstanceNormOp::GetExpectedKernelType(
if (input_data_type == framework::proto::VarType::FP64) { if (input_data_type == framework::proto::VarType::FP64) {
in_param_type = framework::proto::VarType::FP64; in_param_type = framework::proto::VarType::FP64;
} }
PADDLE_ENFORCE_EQ(in_param_type, ctx.Input<Tensor>("Scale")->type(), PADDLE_ENFORCE_EQ(
"Scale input should be of float type"); in_param_type, ctx.Input<Tensor>("Scale")->type(),
PADDLE_ENFORCE_EQ(in_param_type, ctx.Input<Tensor>("Bias")->type(), platform::errors::InvalidArgument("Scale input should be of float type"));
"Bias input should be of float type"); PADDLE_ENFORCE_EQ(
in_param_type, ctx.Input<Tensor>("Bias")->type(),
platform::errors::InvalidArgument("Bias input should be of float type"));
return framework::OpKernelType(input_data_type, ctx.GetPlace()); return framework::OpKernelType(input_data_type, ctx.GetPlace());
} }
...@@ -91,7 +115,8 @@ void InstanceNormOpMaker::Make() { ...@@ -91,7 +115,8 @@ void InstanceNormOpMaker::Make() {
.SetDefault(1e-5) .SetDefault(1e-5)
.AddCustomChecker([](const float &epsilon) { .AddCustomChecker([](const float &epsilon) {
PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 0.001f, true, PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 0.001f, true,
"'epsilon' should be between 0.0 and 0.001."); platform::errors::InvalidArgument(
"'epsilon' should be between 0.0 and 0.001."));
}); });
AddInput("X", "The input tensor"); AddInput("X", "The input tensor");
AddInput("Scale", AddInput("Scale",
...@@ -193,24 +218,21 @@ class InstanceNormKernel<platform::CPUDeviceContext, T> ...@@ -193,24 +218,21 @@ class InstanceNormKernel<platform::CPUDeviceContext, T>
}; };
void InstanceNormGradOp::InferShape(framework::InferShapeContext *ctx) const { void InstanceNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InstanceNormGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true, OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "InstanceNormGrad");
"Input(scale) should not be null"); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
framework::GradVarName("Y"), "InstanceNormGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Y")), true, OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
"Input(Y@GRAD) should not be null"); "InstanceNormGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true, OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
"Input(SavedMean) should not be null"); "InstanceNormGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true,
"Input(SavedVariance) should not be null");
// check output // check output
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true, OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output",
"Output(x@GRAD) should not be null"); framework::GradVarName("X"), "InstanceNormGrad");
if (ctx->HasOutput(framework::GradVarName("Scale"))) { if (ctx->HasOutput(framework::GradVarName("Scale"))) {
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Bias")), true, OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Bias")), "Output",
"Output(Scale@GRAD) and Output(Bias@GRAD) should not be " framework::GradVarName("Bias"), "InstanceNormGrad");
"null at the same time");
} }
const auto x_dims = ctx->GetInputDim("X"); const auto x_dims = ctx->GetInputDim("X");
const int C = x_dims[1]; const int C = x_dims[1];
...@@ -333,21 +355,20 @@ class InstanceNormGradKernel<platform::CPUDeviceContext, T> ...@@ -333,21 +355,20 @@ class InstanceNormGradKernel<platform::CPUDeviceContext, T>
void InstanceNormDoubleGradOp::InferShape( void InstanceNormDoubleGradOp::InferShape(
framework::InferShapeContext *ctx) const { framework::InferShapeContext *ctx) const {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, "Input(X) should not be null"); OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "InstanceNormDoubleGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true, OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale",
"Input(Scale) should not be null."); "InstanceNormDoubleGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true, OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
"Input(SavedMean) should not be null"); "InstanceNormDoubleGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true, OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
"Input(SavedVariance) should not be null"); "InstanceNormDoubleGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("DDX"), true, OP_INOUT_CHECK(ctx->HasInput("DDX"), "Input", "DDX",
"Input(DDX) should not be null."); "InstanceNormDoubleGrad");
PADDLE_ENFORCE_EQ(ctx->HasInput("DY"), true, OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "InstanceNormDoubleGrad");
"Input(Y@GRAD) should not be null");
// check output // check output
PADDLE_ENFORCE_EQ(ctx->HasOutput("DX"), true, OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX",
"Output(DX) should not be null"); "InstanceNormDoubleGrad");
const auto x_dims = ctx->GetInputDim("X"); const auto x_dims = ctx->GetInputDim("X");
const int C = x_dims[1]; const int C = x_dims[1];
......
...@@ -1633,6 +1633,12 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002): ...@@ -1633,6 +1633,12 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002):
npair_loss = fluid.layers.npair_loss(anchor, positive, labels, l2_reg = 0.002) npair_loss = fluid.layers.npair_loss(anchor, positive, labels, l2_reg = 0.002)
''' '''
check_variable_and_dtype(anchor, 'anchor', ['float32', 'float64'],
'npair_loss')
check_variable_and_dtype(positive, 'positive', ['float32', 'float64'],
'positive')
check_variable_and_dtype(labels, 'labels', ['float32', 'float64', 'int64'],
'labels')
Beta = 0.25 Beta = 0.25
batch_size = labels.shape[0] batch_size = labels.shape[0]
......
...@@ -894,6 +894,8 @@ def cos_sim(X, Y): ...@@ -894,6 +894,8 @@ def cos_sim(X, Y):
y = fluid.data(name='y', shape=[1, 7], dtype='float32') y = fluid.data(name='y', shape=[1, 7], dtype='float32')
out = fluid.layers.cos_sim(x, y) out = fluid.layers.cos_sim(x, y)
""" """
check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
helper = LayerHelper('cos_sim', **locals()) helper = LayerHelper('cos_sim', **locals())
out = helper.create_variable_for_type_inference(dtype=X.dtype) out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_variable_for_type_inference(dtype=X.dtype) xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
...@@ -3090,6 +3092,8 @@ def instance_norm(input, ...@@ -3090,6 +3092,8 @@ def instance_norm(input,
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.instance_norm(input=hidden1) hidden2 = fluid.layers.instance_norm(input=hidden1)
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'instance_norm')
assert bias_attr is not False, "bias_attr should not be False in instance_norm." assert bias_attr is not False, "bias_attr should not be False in instance_norm."
helper = LayerHelper('instance_norm', **locals()) helper = LayerHelper('instance_norm', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
......
...@@ -17,6 +17,8 @@ from __future__ import print_function ...@@ -17,6 +17,8 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
class TestCosSimOp(OpTest): class TestCosSimOp(OpTest):
...@@ -105,5 +107,21 @@ class TestCosSimOp4(TestCosSimOp): ...@@ -105,5 +107,21 @@ class TestCosSimOp4(TestCosSimOp):
} }
class TestCosSimOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of batch_norm must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
x2 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.cos_sim, x1, x2)
# the input dtype of batch_norm must be float32
x3 = fluid.layers.data(name='x3', shape=[3, 4, 5, 6], dtype="int32")
x4 = fluid.layers.data(name='x4', shape=[3, 4, 5, 6], dtype="int64")
self.assertRaises(TypeError, fluid.layers.cos_sim, x3, x4)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import paddle.fluid.core as core ...@@ -19,6 +19,7 @@ import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from op_test import OpTest from op_test import OpTest
from paddle.fluid import Program, program_guard
def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var): def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var):
...@@ -200,5 +201,18 @@ class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining): ...@@ -200,5 +201,18 @@ class TestInstanceNormOpTrainingCase2(TestInstanceNormOpTraining):
self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD'] self.fetch_list = ['y', 'saved_mean', 'saved_variance', 'x@GRAD']
class TestInstanceNormOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input of instance_norm must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.instance_norm, x1)
# the input dtype of instance_norm must be float32 or float64
x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="int32")
self.assertRaises(TypeError, fluid.layers.instance_norm, x2)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import numpy as np import numpy as np
from paddle.fluid import Program, program_guard
def npairloss(anchor, positive, labels, l2_reg=0.002): def npairloss(anchor, positive, labels, l2_reg=0.002):
...@@ -106,5 +107,74 @@ class TestNpairLossOp(unittest.TestCase): ...@@ -106,5 +107,74 @@ class TestNpairLossOp(unittest.TestCase):
atol=1e-3) atol=1e-3)
class TestNpairLossOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
anchor_np = np.random.random((2, 4)).astype("float32")
positive_np = np.random.random((2, 4)).astype("float32")
labels_np = np.random.random((2)).astype("float32")
anchor_data = fluid.data(
name='anchor', shape=[2, 4], dtype='float32')
positive_data = fluid.data(
name='positive', shape=[2, 4], dtype='float32')
labels_data = fluid.data(name='labels', shape=[2], dtype='float32')
def test_anchor_Variable():
# the anchor type must be Variable
fluid.layers.npair_loss(
anchor=anchor_np,
positive=positive_data,
labels=labels_data)
def test_positive_Variable():
# the positive type must be Variable
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_np,
labels=labels_data)
def test_labels_Variable():
# the labels type must be Variable
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_np)
self.assertRaises(TypeError, test_anchor_Variable)
self.assertRaises(TypeError, test_positive_Variable)
self.assertRaises(TypeError, test_labels_Variable)
def test_anchor_type():
# dtype must be float32 or float64
anchor_data1 = fluid.data(
name='anchor1', shape=[2, 4], dtype='int32')
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_np)
def test_positive_type():
# dtype must be float32 or float64
positive_data1 = fluid.data(
name='positive1', shape=[2, 4], dtype='int32')
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data1,
labels=labels_np)
def test_labels_type():
# dtype must be float32 or float64
labels_data1 = fluid.data(
name='labels1', shape=[2], dtype='int32')
fluid.layers.npair_loss(
anchor=anchor_data,
positive=positive_data,
labels=labels_data1)
self.assertRaises(TypeError, test_anchor_type)
self.assertRaises(TypeError, test_positive_type)
self.assertRaises(TypeError, test_labels_type)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册