From 4231d840777c9b1720d8a06a05e661189526ad08 Mon Sep 17 00:00:00 2001 From: Bai Yifan Date: Tue, 14 Apr 2020 10:17:25 +0800 Subject: [PATCH] enhance some op/api error message (#23768) * enhance error message, test=develop --- paddle/fluid/operators/deformable_conv_op.cc | 124 +++++++++++------- .../fluid/operators/deformable_conv_v1_op.cc | 76 ++++++----- .../detection/polygon_box_transform_op.cc | 26 ++-- .../detection/polygon_box_transform_op.cu | 5 +- .../teacher_student_sigmoid_loss_op.cc | 100 ++++++++++---- python/paddle/fluid/layers/detection.py | 2 + .../fluid/layers/learning_rate_scheduler.py | 3 + python/paddle/fluid/layers/loss.py | 11 ++ python/paddle/fluid/layers/nn.py | 6 + .../unittests/test_deformable_conv_op.py | 27 ++++ .../unittests/test_deformable_conv_v1_op.py | 33 +++++ .../fluid/tests/unittests/test_mse_loss.py | 17 +++ .../unittests/test_polygon_box_transform.py | 11 ++ .../tests/unittests/test_square_error_cost.py | 17 +++ .../test_teacher_student_sigmoid_loss_op.py | 18 +++ 15 files changed, 356 insertions(+), 120 deletions(-) diff --git a/paddle/fluid/operators/deformable_conv_op.cc b/paddle/fluid/operators/deformable_conv_op.cc index 92f0ec328f..ed52ba7303 100644 --- a/paddle/fluid/operators/deformable_conv_op.cc +++ b/paddle/fluid/operators/deformable_conv_op.cc @@ -109,21 +109,14 @@ class DeformableConvOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE(ctx->HasInput("Offset"), - "Input(Offset) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE(ctx->HasInput("Mask"), - "Input(Mask) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE(ctx->HasInput("Filter"), - "Input(Filter) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Output"), - "Output(Output) of DeformableConvOp " - "should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "deformable_conv"); + OP_INOUT_CHECK(ctx->HasInput("Offset"), "Input", "Offset", + "deformable_conv)"); + OP_INOUT_CHECK(ctx->HasInput("Mask"), "Input", "Mask", "deformable_conv"); + OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", + "deformable_conv"); + OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", + "deformable_conv"); auto in_dims = ctx->GetInputDim("Input"); auto filter_dims = ctx->GetInputDim("Filter"); @@ -138,39 +131,56 @@ class DeformableConvOp : public framework::OperatorWithKernel { int deformable_groups = ctx->Attrs().Get("deformable_groups"); int im2col_step = ctx->Attrs().Get("im2col_step"); - PADDLE_ENFORCE(in_dims.size() == 4, - "Conv input should be 4-D tensor, get %u", in_dims.size()); PADDLE_ENFORCE_EQ( - in_dims.size(), filter_dims.size(), - "Conv input dimension and filter dimension should be the same."); + in_dims.size(), 4, + platform::errors::InvalidArgument( + "Conv input should be 4-D tensor, get %u", in_dims.size())); + PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(), + platform::errors::InvalidArgument( + "Conv input dimension and filter dimension should be " + "the same. The diff is [%d] vs [%d]", + in_dims.size(), filter_dims.size())); PADDLE_ENFORCE_EQ( in_dims.size() - strides.size(), 2U, - "Conv input dimension and strides dimension should be consistent."); + platform::errors::InvalidArgument("Conv input dimension and strides " + "dimension should be consistent.")); PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), - "Conv paddings dimension and Conv strides dimension " - "should be the same."); + platform::errors::InvalidArgument( + "Conv paddings dimension and Conv strides dimension " + "should be the same. The diff is [%d] vs [%d]", + paddings.size(), strides.size())); - PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups, - "The number of input channels should be equal to filter " - "channels * groups."); + PADDLE_ENFORCE_EQ( + in_dims[1], filter_dims[1] * groups, + platform::errors::InvalidArgument( + "The number of input channels should be equal to filter " + "channels * groups. The diff is [%d] vs [%d]", + in_dims[1], filter_dims[1] * groups)); PADDLE_ENFORCE_EQ( filter_dims[0] % groups, 0, - "The number of output channels should be divided by groups."); - PADDLE_ENFORCE_EQ(filter_dims[0] % deformable_groups, 0, - "The number of output channels should be " - "divided by deformable groups."); + platform::errors::InvalidArgument( + "The number of output channels should be divided by groups.")); + PADDLE_ENFORCE_EQ( + filter_dims[0] % deformable_groups, 0, + platform::errors::InvalidArgument( + "The number of output channels should be " + "divided by deformable groups. The diff is [%d] vs [%d]", + filter_dims[0] % groups, 0)); if (in_dims[0] > im2col_step) { PADDLE_ENFORCE_EQ( in_dims[0] % im2col_step, 0U, - "Input batchsize must be smaller than or divide im2col_step"); + platform::errors::InvalidArgument( + "Input batchsize must be smaller than or divide im2col_step")); } for (size_t i = 0; i < strides.size(); ++i) { - PADDLE_ENFORCE_GT(strides[i], 0U, "stride %d size incorrect", i); + PADDLE_ENFORCE_GT(strides[i], 0U, platform::errors::InvalidArgument( + "stride %d size incorrect", i)); } for (size_t i = 0; i < dilations.size(); ++i) { - PADDLE_ENFORCE_GT(dilations[i], 0U, "dilation %d size incorrect", i); + PADDLE_ENFORCE_GT(dilations[i], 0U, platform::errors::InvalidArgument( + "dilation %d size incorrect", i)); } std::vector output_shape({in_dims[0], filter_dims[0]}); @@ -185,29 +195,49 @@ class DeformableConvOp : public framework::OperatorWithKernel { } } - PADDLE_ENFORCE_EQ(output_shape[1] % deformable_groups, 0U, - "output num_filter must divide deformable group size."); + PADDLE_ENFORCE_EQ( + output_shape[1] % deformable_groups, 0U, + platform::errors::InvalidArgument( + "output num_filter must divide deformable group size.")); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2], - "output height must equal to offset map height."); + platform::errors::InvalidArgument( + "output height must equal to offset map height. " + "The diff is [%d] vs [%d]", + output_shape[2], offset_dims[2])); PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3], - "output width must equal to offset map width."); - PADDLE_ENFORCE_EQ(offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, - "offset filter must divide deformable group size."); - PADDLE_ENFORCE_EQ(offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]), - deformable_groups, - "offset filter must divide deformable group size."); + platform::errors::InvalidArgument( + "output width must equal to offset map width. The " + "diff is [%d] vs [%d]", + output_shape[3], offset_dims[3])); + PADDLE_ENFORCE_EQ( + offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, + platform::errors::InvalidArgument( + "offset filter must divide deformable group size.")); + PADDLE_ENFORCE_EQ( + offset_dims[1] / (2 * filter_dims[2] * filter_dims[3]), + deformable_groups, + platform::errors::InvalidArgument( + "offset filter must divide deformable group size.")); PADDLE_ENFORCE_EQ(output_shape[2], mask_dims[2], - "output height must equal to mask map height."); + platform::errors::InvalidArgument( + "output height must equal to mask map height. The " + "diff is [%d] vs [%d]", + output_shape[2], mask_dims[2])); PADDLE_ENFORCE_EQ(output_shape[3], mask_dims[3], - "output width must equal to mask map width."); + platform::errors::InvalidArgument( + "output width must equal to mask map width. The " + "diff is [%d] vs [%d]", + output_shape[3], mask_dims[3])); PADDLE_ENFORCE_EQ(mask_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, - "mask filter must divide deformable group size."); + platform::errors::InvalidArgument( + "mask filter must divide deformable group size.")); PADDLE_ENFORCE_EQ(mask_dims[1] / (filter_dims[2] * filter_dims[3]), deformable_groups, - "mask filter must divide deformable group size."); + platform::errors::InvalidArgument( + "mask filter must divide deformable group size.")); } ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); @@ -255,8 +285,8 @@ class DeformableConvGradOp : public framework::OperatorWithKernel { auto offset_dims = ctx->GetInputDim("Offset"); auto mask_dims = ctx->GetInputDim("Mask"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Output")), - "the gradient of output(Out) must not be null"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Output")), "Input", + "Output@Grad", "deformable_conv_grad"); if (ctx->HasOutput(framework::GradVarName("Input"))) { ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); } diff --git a/paddle/fluid/operators/deformable_conv_v1_op.cc b/paddle/fluid/operators/deformable_conv_v1_op.cc index 76ba31db9d..38b7cbb211 100644 --- a/paddle/fluid/operators/deformable_conv_v1_op.cc +++ b/paddle/fluid/operators/deformable_conv_v1_op.cc @@ -114,18 +114,14 @@ class DeformableConvV1Op : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, - "Input(Input) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE_EQ(ctx->HasInput("Offset"), true, - "Input(Offset) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE_EQ(ctx->HasInput("Filter"), true, - "Input(Filter) of DeformableConvOp " - "should not be null"); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true, - "Output(Output) of DeformableConvOp " - "should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", + "deformable_conv_v1"); + OP_INOUT_CHECK(ctx->HasInput("Offset"), "Input", "Offset", + "deformable_conv_v1"); + OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", + "deformable_conv_v1"); + OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", + "deformable_conv_v1"); auto in_dims = ctx->GetInputDim("Input"); auto filter_dims = ctx->GetInputDim("Filter"); @@ -139,40 +135,52 @@ class DeformableConvV1Op : public framework::OperatorWithKernel { int deformable_groups = ctx->Attrs().Get("deformable_groups"); int im2col_step = ctx->Attrs().Get("im2col_step"); - PADDLE_ENFORCE_EQ(in_dims.size(), 4, - "Conv input should be 4-D tensor, get %u", - in_dims.size()); PADDLE_ENFORCE_EQ( - in_dims.size(), filter_dims.size(), - "Conv input dimension and filter dimension should be the same."); + in_dims.size(), 4, + platform::errors::InvalidArgument( + "Conv input should be 4-D tensor, get %u", in_dims.size())); + PADDLE_ENFORCE_EQ(in_dims.size(), filter_dims.size(), + platform::errors::InvalidArgument( + "Conv input dimension and filter dimension should be " + "the same. the diff is [%d] vs [%d]", + in_dims.size(), filter_dims.size())); PADDLE_ENFORCE_EQ( in_dims.size() - strides.size(), 2U, - "Conv input dimension and strides dimension should be consistent."); + platform::errors::InvalidArgument("Conv input dimension and strides " + "dimension should be consistent.")); PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), - "Conv paddings dimension and Conv strides dimension " - "should be the same."); + platform::errors::InvalidArgument( + "Conv paddings dimension and Conv strides dimension " + "should be the same. The diff is [%d] vs [%d]", + paddings.size(), strides.size())); - PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[1] * groups, - "The number of input channels should be equal to filter " - "channels * groups."); + PADDLE_ENFORCE_EQ( + in_dims[1], filter_dims[1] * groups, + platform::errors::InvalidArgument( + "The number of input channels should be equal to filter " + "channels * groups.")); PADDLE_ENFORCE_EQ( filter_dims[0] % groups, 0, "The number of output channels should be divided by groups."); PADDLE_ENFORCE_EQ(filter_dims[0] % deformable_groups, 0, - "The number of output channels should be " - "divided by deformable groups."); + platform::errors::InvalidArgument( + "The number of output channels should be " + "divided by deformable groups.")); if (in_dims[0] > im2col_step) { PADDLE_ENFORCE_EQ( in_dims[0] % im2col_step, 0U, - "Input batchsize must be smaller than or divide im2col_step"); + platform::errors::InvalidArgument( + "Input batchsize must be smaller than or divide im2col_step")); } for (size_t i = 0; i < strides.size(); ++i) { - PADDLE_ENFORCE_GT(strides[i], 0U, "stride %d size incorrect", i); + PADDLE_ENFORCE_GT(strides[i], 0U, platform::errors::InvalidArgument( + "stride %d size incorrect", i)); } for (size_t i = 0; i < dilations.size(); ++i) { - PADDLE_ENFORCE_GT(dilations[i], 0U, "dilation %d size incorrect", i); + PADDLE_ENFORCE_GT(dilations[i], 0U, platform::errors::InvalidArgument( + "dilation %d size incorrect", i)); } std::vector output_shape({in_dims[0], filter_dims[0]}); @@ -193,10 +201,14 @@ class DeformableConvV1Op : public framework::OperatorWithKernel { "output num_filter must divide deformable group size.")); PADDLE_ENFORCE_EQ(output_shape[2], offset_dims[2], platform::errors::InvalidArgument( - "output height must equal to offset map height.")); + "output height must equal to offset map height. " + "The diff is [%d] vs [%d]", + output_shape[2], offset_dims[2])); PADDLE_ENFORCE_EQ(output_shape[3], offset_dims[3], platform::errors::InvalidArgument( - "output width must equal to offset map width.")); + "output width must equal to offset map width. The " + "diff is [%d] vs [%d]", + output_shape[3], offset_dims[3])); PADDLE_ENFORCE_EQ( offset_dims[1] % (filter_dims[2] * filter_dims[3]), 0U, platform::errors::InvalidArgument( @@ -249,8 +261,8 @@ class DeformableConvV1GradOp : public framework::OperatorWithKernel { auto filter_dims = ctx->GetInputDim("Filter"); auto offset_dims = ctx->GetInputDim("Offset"); - PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Output")), true, - "the gradient of output(Out) must not be null"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Output")), "Input", + "Output@Grad", "deformable_conv_v1_grad"); if (ctx->HasOutput(framework::GradVarName("Input"))) { ctx->SetOutputDim(framework::GradVarName("Input"), in_dims); } diff --git a/paddle/fluid/operators/detection/polygon_box_transform_op.cc b/paddle/fluid/operators/detection/polygon_box_transform_op.cc index ab134e2091..1fcd6cec9b 100644 --- a/paddle/fluid/operators/detection/polygon_box_transform_op.cc +++ b/paddle/fluid/operators/detection/polygon_box_transform_op.cc @@ -23,8 +23,9 @@ template class PolygonBoxTransformCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* in = ctx.Input("Input"); auto in_dims = in->dims(); const T* in_data = in->data(); @@ -56,18 +57,23 @@ class PolygonBoxTransformOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE( - ctx->HasInput("Input"), - "Input (Input) of polygon_box transform op should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Output"), - "Output (Output) of polygon_box transform op should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", + "polygon_box_transform"); + OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", + "polygon_box_transform"); auto in_dim = ctx->GetInputDim("Input"); - PADDLE_ENFORCE_EQ(in_dim.size(), 4, "input's rank must be 4."); + PADDLE_ENFORCE_EQ( + in_dim.size(), 4, + platform::errors::InvalidArgument( + "input's rank must be 4. But received: Input rank is [%d]", + in_dim.size())); PADDLE_ENFORCE_EQ(in_dim[1] % 2, 0, - "input's second dimension must be even."); + platform::errors::InvalidArgument( + "input's second dimension must be even. But " + "received: Input 2nd dim is [%d]", + in_dim[1])); ctx->SetOutputDim("Output", in_dim); } diff --git a/paddle/fluid/operators/detection/polygon_box_transform_op.cu b/paddle/fluid/operators/detection/polygon_box_transform_op.cu index e1eaf084a3..337a76f9f9 100644 --- a/paddle/fluid/operators/detection/polygon_box_transform_op.cu +++ b/paddle/fluid/operators/detection/polygon_box_transform_op.cu @@ -43,8 +43,9 @@ template class PolygonBoxTransformOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument("It must use CUDAPlace.")); auto* in = ctx.Input("Input"); auto in_dims = in->dims(); const T* in_data = in->data(); diff --git a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc index 5b547e13bc..f8d7dfdfbe 100644 --- a/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc +++ b/paddle/fluid/operators/teacher_student_sigmoid_loss_op.cc @@ -28,22 +28,38 @@ class TeacherStudentSigmoidLossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", + "teacher_student_sigmoid_loss"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", + "teacher_student_sigmoid_loss"); + OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", + "teacher_student_sigmoid_loss"); auto x_dims = ctx->GetInputDim("X"); auto label_dims = ctx->GetInputDim("Label"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); - PADDLE_ENFORCE_EQ(label_dims.size(), 2UL, - "Input(Label)'s rank should be 2."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2UL, + platform::errors::NotFound("Input(X)'s rank should be 2. But received: " + "Input(X)'s rank is [%d]", + x_dims.size())); + PADDLE_ENFORCE_EQ( + label_dims.size(), 2UL, + platform::errors::NotFound("Input(Label)'s rank should be 2. But " + "received Input(Label)'s rank is [%d]", + label_dims.size())); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], - "The 1st dimension of Input(X) and Input(Label) should " - "be equal."); + PADDLE_ENFORCE_EQ( + x_dims[0], label_dims[0], + platform::errors::InvalidArgument( + "The 1st dimension of Input(X) and Input(Label) should " + "be equal. The diff is [%d] vs [%d]", + x_dims[0], label_dims[0])); PADDLE_ENFORCE_EQ(label_dims[1], 1UL, - "The 2nd dimension of " - "Input(Label) should be 1."); + platform::errors::InvalidArgument( + "The 2nd dimension of " + "Input(Label) should be 1. But received " + "Input(Label)'s 2nd dim is [%d]", + label_dims[1])); } ctx->SetOutputDim("Y", {x_dims[0], 1}); ctx->ShareLoD("X", /*->*/ "Y"); @@ -87,32 +103,58 @@ class TeacherStudentSigmoidLossGradientOp using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "Input(Y@GRAD) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X@GRAD) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", + "teacher_student_sigmoid_loss_grad"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "X", + "teacher_student_sigmoid_loss_grad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input", + "Y@Grad", "teacher_student_sigmoid_loss_grad"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Input", + "X@Grad", "teacher_student_sigmoid_loss_grad"); auto x_dims = ctx->GetInputDim("X"); auto label_dims = ctx->GetInputDim("Label"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); - PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); - PADDLE_ENFORCE_EQ(label_dims.size(), 2, "Input(Label)'s rank should be 2."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "Input(X)'s rank should be 2. But received Input(X)'s rank is [%d]", + x_dims.size())); + PADDLE_ENFORCE_EQ(dy_dims.size(), 2, + platform::errors::InvalidArgument( + "Input(Y@Grad)'s rank should be 2. But received " + "Input(Y@Grad)'s rank is [%d]", + dy_dims.size())); + PADDLE_ENFORCE_EQ(label_dims.size(), 2, + platform::errors::InvalidArgument( + "Input(Label)'s rank should be 2. But received " + "Input(Y@Grad)'s rank is [%d]", + label_dims.size())); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0], - "The 1st dimension of Input(X) and Input(Label) should " - "be equal."); + PADDLE_ENFORCE_EQ( + x_dims[0], label_dims[0], + platform::errors::InvalidArgument( + "The 1st dimension of Input(X) and Input(Label) should " + "be equal. The diff is [%d] vs [%d]", + x_dims[0], label_dims[0])); PADDLE_ENFORCE_EQ( x_dims[0], dy_dims[0], - "The 1st dimension of Input(X) and Input(Y@Grad) should " - "be equal."); + platform::errors::InvalidArgument( + "The 1st dimension of Input(X) and Input(Y@Grad) should " + "be equal. The diff is [%d] vs [%d]", + x_dims[0], dy_dims[0])); PADDLE_ENFORCE_EQ(dy_dims[1], 1, - "The 2nd dimension of Input(Y@Grad) should be 1."); - PADDLE_ENFORCE_EQ(label_dims[1], 1, - "When Attr(soft_label) == false, the 2nd dimension of " - "Input(Label) should be 1."); + platform::errors::InvalidArgument( + "The 2nd dimension of Input(Y@Grad) should be 1. " + "But received Input(Y@Grad)'s 2nd dim is [%d]", + dy_dims[1])); + PADDLE_ENFORCE_EQ( + label_dims[1], 1, + platform::errors::InvalidArgument( + "When Attr(soft_label) == false, the 2nd dimension of " + "Input(Label) should be 1. But received Input(Label)'s 2nd dim " + "is [%d]", + label_dims[1])); } ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index a91575d541..18f057623a 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -891,6 +891,8 @@ def polygon_box_transform(input, name=None): input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32') out = fluid.layers.polygon_box_transform(input) """ + check_variable_and_dtype(input, "input", ['float32', 'float64'], + 'polygon_box_transform') helper = LayerHelper("polygon_box_transform", **locals()) output = helper.create_variable_for_type_inference(dtype=input.dtype) diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 76e4fe5fcf..0115b398e6 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -33,6 +33,7 @@ from ..framework import default_main_program, Parameter, unique_name, name_scope from ..framework import Variable from ..framework import in_dygraph_mode from ..dygraph import learning_rate_scheduler as imperate_lr +from ..data_feeder import check_variable_and_dtype, check_type __all__ = [ 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', @@ -449,6 +450,8 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): lr = fluid.layers.cosine_decay( learning_rate = base_lr, step_each_epoch=10000, epochs=120) """ + check_type(learning_rate, 'learning_rate', (float, tensor.Variable), + 'cosine_decay') with default_main_program()._lr_schedule_guard(): if in_dygraph_mode(): diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index 284ee8e389..dc61aa1f4c 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -334,6 +334,10 @@ def square_error_cost(input, label): # [0.04000002] """ + check_variable_and_dtype(input, "input", ['float32', 'float64'], + 'square_error_cost') + check_variable_and_dtype(label, "label", ['float32', 'float64'], + 'square_error_cost') helper = LayerHelper('square_error_cost', **locals()) minus_out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( @@ -1481,6 +1485,11 @@ def teacher_student_sigmoid_loss(input, cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) """ + check_variable_and_dtype(input, "input", ['float32', 'float64'], + 'teacher_student_sigmoid_loss') + check_variable_and_dtype(label, "label", ['float32', 'float64'], + 'teacher_student_sigmoid_loss') + helper = LayerHelper('teacher_student_sigmoid_loss', **locals()) out = helper.create_variable(dtype=input.dtype) helper.append_op( @@ -1715,4 +1724,6 @@ def mse_loss(input, label): # [0.04000002] """ + check_variable_and_dtype(input, "input", ['float32', 'float64'], 'mse_loss') + check_variable_and_dtype(label, "label", ['float32', 'float64'], 'mse_loss') return nn.reduce_mean(square_error_cost(input, label)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index a232d0b514..9f010b1592 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13560,6 +13560,12 @@ def deformable_conv(input, num_filters=2, filter_size=filter_size, padding=1, modulated=False) """ + check_variable_and_dtype(input, "input", ['float32', 'float64'], + 'deformable_conv') + check_variable_and_dtype(offset, "offset", ['float32', 'float64'], + 'deformable_conv') + check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv') + num_channels = input.shape[1] assert param_attr is not False, "param_attr should not be False here." diff --git a/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py b/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py index 13651be800..e685d7b5f5 100644 --- a/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_deformable_conv_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np import paddle.fluid.core as core +import paddle.fluid as fluid from op_test import OpTest @@ -256,5 +257,31 @@ class TestWithGroup(TestModulatedDeformableConvOp): self.groups = 2 +class TestModulatedDeformableConvInvalidInput(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + input = [1, 3, 32, 32] + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32') + mask = fluid.data( + name='mask', shape=[None, 3, 32, 32], dtype='float32') + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=1) + + self.assertRaises(TypeError, test_invalid_input) + + def test_invalid_offset(): + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int32') + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32') + mask = fluid.data( + name='mask', shape=[None, 3, 32, 32], dtype='float32') + loss = fluid.layers.deformable_conv( + input, offset, mask, num_filters=4, filter_size=1) + + self.assertRaises(TypeError, test_invalid_offset) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py b/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py index 2f950b53e0..769f05b0fc 100644 --- a/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py +++ b/python/paddle/fluid/tests/unittests/test_deformable_conv_v1_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np import paddle.fluid.core as core +import paddle.fluid as fluid from op_test import OpTest @@ -252,5 +253,37 @@ class TestWithGroup(TestModulatedDeformableConvOp): self.groups = 2 +class TestModulatedDeformableConvV1InvalidInput(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + input = [1, 3, 32, 32] + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32') + loss = fluid.layers.deformable_conv( + input, + offset, + mask=None, + num_filters=4, + filter_size=1, + modulated=False) + + self.assertRaises(TypeError, test_invalid_input) + + def test_invalid_offset(): + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int32') + offset = fluid.data( + name='offset', shape=[None, 3, 32, 32], dtype='float32') + loss = fluid.layers.deformable_conv( + input, + offset, + mask=None, + num_filters=4, + filter_size=1, + modulated=False) + + self.assertRaises(TypeError, test_invalid_offset) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mse_loss.py b/python/paddle/fluid/tests/unittests/test_mse_loss.py index 4e8d9c4955..7ad8086bd8 100644 --- a/python/paddle/fluid/tests/unittests/test_mse_loss.py +++ b/python/paddle/fluid/tests/unittests/test_mse_loss.py @@ -47,5 +47,22 @@ class TestMseLoss(unittest.TestCase): self.assertTrue(np.isclose(np_result, result).all()) +class TestMseInvalidInput(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + input = [256, 3] + label = fluid.data(name='label', shape=[None, 3], dtype='float32') + loss = fluid.layers.mse_loss(input, label) + + self.assertRaises(TypeError, test_invalid_input) + + def test_invalid_label(): + input = fluid.data(name='input1', shape=[None, 3], dtype='float32') + label = [256, 3] + loss = fluid.layers.mse_loss(input, label) + + self.assertRaises(TypeError, test_invalid_label) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py index 7f266056a9..578b01b02d 100644 --- a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py +++ b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import numpy as np +import paddle.fluid as fluid from op_test import OpTest @@ -66,5 +67,15 @@ class TestCase2(TestPolygonBoxRestoreOp): self.input_shape = (3, 12, 4, 5) +class TestPolygonBoxInvalidInput(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + input = fluid.data( + name='input', shape=[None, 3, 32, 32], dtype='int64') + out = fluid.layers.polygon_box_transform(input) + + self.assertRaises(TypeError, test_invalid_input) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_square_error_cost.py b/python/paddle/fluid/tests/unittests/test_square_error_cost.py index b83c0ba634..a10d0efe3c 100644 --- a/python/paddle/fluid/tests/unittests/test_square_error_cost.py +++ b/python/paddle/fluid/tests/unittests/test_square_error_cost.py @@ -48,5 +48,22 @@ class TestSquareErrorCost(unittest.TestCase): self.assertTrue(np.isclose(np_result, result).all()) +class TestSquareErrorInvalidInput(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + input = [256, 3] + label = fluid.data(name='label1', shape=[None, 3], dtype='float32') + loss = fluid.layers.square_error_cost(input, label) + + self.assertRaises(TypeError, test_invalid_input) + + def test_invalid_label(): + input = fluid.data(name='input2', shape=[None, 3], dtype='float32') + label = [256, 3] + loss = fluid.layers.square_error_cost(input, label) + + self.assertRaises(TypeError, test_invalid_label) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py b/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py index f133d470c5..e0142776c8 100644 --- a/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_teacher_student_sigmoid_loss_op.py @@ -19,6 +19,7 @@ from op_test import OpTest from scipy.special import logit from scipy.special import expit import unittest +import paddle.fluid as fluid class TestTeacherStudentSigmoidLossOp(OpTest): @@ -57,3 +58,20 @@ class TestTeacherStudentSigmoidLossOp(OpTest): def test_check_grad(self): self.check_grad(["X"], "Y", numeric_grad_delta=0.005) + + +class TestTeacherStudentSigmoidLossInvalidInput(unittest.TestCase): + def test_error(self): + def test_invalid_input(): + input = [512, 1] + label = fluid.data(name='label', shape=[None, 1], dtype='float32') + loss = fluid.layers.teacher_student_sigmoid_loss(input, label) + + self.assertRaises(TypeError, test_invalid_input) + + def test_invalid_label(): + input = fluid.data(name='input1', shape=[None, 1], dtype='float32') + label = [512, 1] + loss = fluid.layers.teacher_student_sigmoid_loss(input, label) + + self.assertRaises(TypeError, test_invalid_label) -- GitLab