From 05c9642dc3b7caef2b504ef096eea14fce80acac Mon Sep 17 00:00:00 2001 From: suytingwan Date: Thu, 14 May 2020 10:32:59 +0800 Subject: [PATCH] Update paddle enforce message (#24498) * test=develop error message update --- .../operators/bilinear_tensor_product_op.cc | 93 ++++++++++----- .../detection/anchor_generator_op.cc | 41 ++++--- .../operators/detection/bipartite_match_op.cc | 44 ++++--- .../detection/generate_mask_labels_op.cc | 108 +++++++++++------ .../operators/detection/target_assign_op.cc | 40 ++++--- .../operators/detection/target_assign_op.h | 9 +- paddle/fluid/operators/filter_by_instag_op.cc | 39 ++++--- paddle/fluid/operators/one_hot_v2_op.h | 14 ++- paddle/fluid/operators/pool_with_index_op.cc | 46 +++++--- paddle/fluid/operators/psroi_pool_op.cc | 70 ++++++----- paddle/fluid/operators/psroi_pool_op.h | 12 +- paddle/fluid/operators/roi_pool_op.cc | 5 +- paddle/fluid/operators/roi_pool_op.h | 12 +- paddle/fluid/operators/softmax_op.cc | 52 ++++++--- .../softmax_with_cross_entropy_op.cc | 109 +++++++++++------- .../operators/softmax_with_cross_entropy_op.h | 5 +- paddle/fluid/operators/spp_op.cc | 25 ++-- paddle/fluid/operators/unsqueeze_op.cc | 78 ++++++++----- paddle/fluid/operators/unsqueeze_op.h | 13 ++- 19 files changed, 534 insertions(+), 281 deletions(-) diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index d3eb7561ec..a2ba74dd7e 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -28,39 +28,61 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Weight"), - "Input(Weight) should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::InvalidArgument("Input(X) should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Y"), true, + platform::errors::InvalidArgument("Input(Y) should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Weight"), true, + platform::errors::InvalidArgument("Input(Weight) should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument("Output(Out) should not be null.")); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); auto weight_dims = ctx->GetInputDim("Weight"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "The input(X) must be a 2D Tensor."); - PADDLE_ENFORCE_EQ(y_dims.size(), 2UL, "The input(Y) must be a 2D Tensor."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2UL, + platform::errors::InvalidArgument("The input(X) must be a 2D Tensor.")); + PADDLE_ENFORCE_EQ( + y_dims.size(), 2UL, + platform::errors::InvalidArgument("The input(Y) must be a 2D Tensor.")); PADDLE_ENFORCE_EQ(weight_dims.size(), 3UL, - "The input(Weight) must be a 3D tensor."); + platform::errors::InvalidArgument( + "The input(Weight) must be a 3D tensor.")); if (ctx->IsRuntime() || (x_dims[0] > 0 && y_dims[0] > 0)) { - PADDLE_ENFORCE_EQ(x_dims[0], y_dims[0], - "The first dimension(batch_size) of input(X) must be " - "equal to the first dimension of the input(Y)."); + PADDLE_ENFORCE_EQ( + x_dims[0], y_dims[0], + platform::errors::InvalidArgument( + "The first dimension(batch_size) of input(X) must be " + "equal to the first dimension of the input(Y).")); } PADDLE_ENFORCE_EQ(x_dims[1], weight_dims[1], - "The second dimension of input(X) must be equal to " - "the second dimension of the input(Weight)."); + platform::errors::InvalidArgument( + "The second dimension of input(X) must be equal to " + "the second dimension of the input(Weight).")); PADDLE_ENFORCE_EQ(y_dims[1], weight_dims[2], - "The second dimension of input(Y) must be equal to " - "the third dimension of the input(Weight)."); + platform::errors::InvalidArgument( + "The second dimension of input(Y) must be equal to " + "the third dimension of the input(Weight).")); if (ctx->HasInput("Bias")) { auto bias_dims = ctx->GetInputDim("Bias"); - PADDLE_ENFORCE(bias_dims.size() == 2UL && bias_dims[0] == 1UL, - "The Input(Bias) must be a 2-D tensor with " - "the 2nd dimension fixed to 1 (a row vector)."); + PADDLE_ENFORCE_EQ(bias_dims.size(), 2UL, + platform::errors::InvalidArgument( + "The Input(Bias) must be a 2-D tensor with " + "the 2nd dimension fixed to 1 (a row vector).")); + PADDLE_ENFORCE_EQ(bias_dims[0], 1UL, + platform::errors::InvalidArgument( + "The Input(Bias) must be a 2-D tensor with " + "the 2nd dimension fixed to 1 (a row vector).")); PADDLE_ENFORCE_EQ(bias_dims[1], weight_dims[0], - "The second dimension of input(Bias) must be equal " - "to the first dimension of the input(Weight)."); + platform::errors::InvalidArgument( + "The second dimension of input(Bias) must be equal " + "to the first dimension of the input(Weight).")); } ctx->SetOutputDim("Out", {x_dims[0], weight_dims[0]}); @@ -104,27 +126,36 @@ class BilinearTensorProductOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Weight"), - "Input(Weight) should not be null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::InvalidArgument("Input(X) should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Y"), true, + platform::errors::InvalidArgument("Input(Y) should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Weight"), true, + platform::errors::InvalidArgument("Input(Weight) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, + platform::errors::InvalidArgument( + "Input(Out@GRAD) should not be null.")); auto x_dims = ctx->GetInputDim("X"); auto y_dims = ctx->GetInputDim("Y"); auto weight_dims = ctx->GetInputDim("Weight"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_EQ(out_dims.size(), 2UL, - "The input(Out@GRAD) must be a 2D Tensor."); + platform::errors::InvalidArgument( + "The input(Out@GRAD) must be a 2D Tensor.")); PADDLE_ENFORCE_EQ( x_dims[0], out_dims[0], - "The first dimension(batch_size) of input(Out@GRAD) must be " - "equal to the first dimension of the Input(X)."); + platform::errors::InvalidArgument( + "The first dimension(batch_size) of input(Out@GRAD) must be " + "equal to the first dimension of the Input(X).")); PADDLE_ENFORCE_EQ( weight_dims[0], out_dims[1], - "The second dimension of input(Out@GRAD) must be equal to " - "the third dimension of the Input(Weight)."); + platform::errors::InvalidArgument( + "The second dimension of input(Out@GRAD) must be equal to " + "the third dimension of the Input(Weight).")); auto bias_grad_name = framework::GradVarName("Bias"); if (ctx->HasOutput(bias_grad_name)) { diff --git a/paddle/fluid/operators/detection/anchor_generator_op.cc b/paddle/fluid/operators/detection/anchor_generator_op.cc index a3030dbdd2..c03e4bfab8 100644 --- a/paddle/fluid/operators/detection/anchor_generator_op.cc +++ b/paddle/fluid/operators/detection/anchor_generator_op.cc @@ -22,16 +22,23 @@ class AnchorGeneratorOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input"), - "Input(Input) of AnchorGeneratorOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Anchors"), - "Output(Anchors) of AnchorGeneratorOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Variances"), - "Output(Variances) of AnchorGeneratorOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Input"), true, + platform::errors::InvalidArgument( + "Input(Input) of AnchorGeneratorOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Anchors"), true, + platform::errors::InvalidArgument( + "Output(Anchors) of AnchorGeneratorOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Variances"), true, + platform::errors::InvalidArgument( + "Output(Variances) of AnchorGeneratorOp should not be null.")); auto input_dims = ctx->GetInputDim("Input"); - PADDLE_ENFORCE(input_dims.size() == 4, "The layout of input is NCHW."); + PADDLE_ENFORCE_EQ( + input_dims.size(), 4, + platform::errors::InvalidArgument("The layout of input is NCHW.")); auto anchor_sizes = ctx->Attrs().Get>("anchor_sizes"); auto aspect_ratios = ctx->Attrs().Get>("aspect_ratios"); @@ -87,10 +94,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { "equals to 64**2.") .AddCustomChecker([](const std::vector& anchor_sizes) { PADDLE_ENFORCE_GT(anchor_sizes.size(), 0UL, - "Size of anchor_sizes must be at least 1."); + platform::errors::InvalidArgument( + "Size of anchor_sizes must be at least 1.")); for (size_t i = 0; i < anchor_sizes.size(); ++i) { PADDLE_ENFORCE_GT(anchor_sizes[i], 0.0, - "anchor_sizes[%d] must be positive.", i); + platform::errors::InvalidArgument( + "anchor_sizes[%d] must be positive.", i)); } }); AddAttr>( @@ -105,10 +114,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { "in box regression deltas") .AddCustomChecker([](const std::vector& variances) { PADDLE_ENFORCE_EQ(variances.size(), 4UL, - "Must and only provide 4 variance."); + platform::errors::InvalidArgument( + "Must provide 4 variance only.")); for (size_t i = 0; i < variances.size(); ++i) { PADDLE_ENFORCE_GT(variances[i], 0.0, - "variance[%d] must be greater than 0.", i); + platform::errors::InvalidArgument( + "variance[%d] must be greater than 0.", i)); } }); @@ -119,10 +130,12 @@ class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { .AddCustomChecker([](const std::vector& stride) { PADDLE_ENFORCE_EQ( stride.size(), 2UL, - "Must and only provide 2 stride for width and height."); + platform::errors::InvalidArgument( + "Must provide 2 stride for width and height only.")); for (size_t i = 0; i < stride.size(); ++i) { PADDLE_ENFORCE_GT(stride[i], 0.0, - "stride[%d] should be larger than 0.", i); + platform::errors::InvalidArgument( + "stride[%d] should be larger than 0.", i)); } }); diff --git a/paddle/fluid/operators/detection/bipartite_match_op.cc b/paddle/fluid/operators/detection/bipartite_match_op.cc index de4579919c..16e1699e12 100644 --- a/paddle/fluid/operators/detection/bipartite_match_op.cc +++ b/paddle/fluid/operators/detection/bipartite_match_op.cc @@ -26,17 +26,23 @@ class BipartiteMatchOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("DistMat"), - "Input(DistMat) of BipartiteMatch should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("ColToRowMatchIndices"), - "Output(ColToRowMatchIndices) of BipartiteMatch should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("ColToRowMatchDist"), - "Output(ColToRowMatchDist) of BipartiteMatch should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("DistMat"), true, + platform::errors::InvalidArgument( + "Input(DistMat) of BipartiteMatch should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("ColToRowMatchIndices"), true, + platform::errors::InvalidArgument( + "Output(ColToRowMatchIndices) of BipartiteMatch " + "should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("ColToRowMatchDist"), true, + platform::errors::InvalidArgument( + "Output(ColToRowMatchDist) of BipartiteMatch should not be null.")); auto dims = ctx->GetInputDim("DistMat"); - PADDLE_ENFORCE_EQ(dims.size(), 2, "The rank of Input(DistMat) must be 2."); + PADDLE_ENFORCE_EQ(dims.size(), 2, + platform::errors::InvalidArgument( + "The rank of Input(DistMat) must be 2.")); ctx->SetOutputDim("ColToRowMatchIndices", dims); ctx->SetOutputDim("ColToRowMatchDist", dims); @@ -64,7 +70,9 @@ class BipartiteMatchKernel : public framework::OpKernel { // The match_dist must be initialized to 0 at first. void BipartiteMatch(const Tensor& dist, int* match_indices, T* match_dist) const { - PADDLE_ENFORCE_EQ(dist.dims().size(), 2, "The rank of dist must be 2."); + PADDLE_ENFORCE_EQ( + dist.dims().size(), 2, + platform::errors::InvalidArgument("The rank of dist must be 2.")); int64_t row = dist.dims()[0]; int64_t col = dist.dims()[1]; auto* dist_data = dist.data(); @@ -127,7 +135,11 @@ class BipartiteMatchKernel : public framework::OpKernel { // Cannot find good match. break; } else { - PADDLE_ENFORCE_EQ(match_indices[max_idx], -1); + PADDLE_ENFORCE_EQ( + match_indices[max_idx], -1, + platform::errors::InvalidArgument( + "The match_indices must be initialized to -1 at [%d].", + max_idx)); match_indices[max_idx] = max_row_idx; match_dist[max_idx] = max_dist; // Erase the row index. @@ -163,7 +175,10 @@ class BipartiteMatchKernel : public framework::OpKernel { } } if (max_row_idx != -1) { - PADDLE_ENFORCE_EQ(match_indices[j], -1); + PADDLE_ENFORCE_EQ( + match_indices[j], -1, + platform::errors::InvalidArgument( + "The match_indices must be initialized to -1 at [%d].", j)); match_indices[j] = max_row_idx; match_dist[j] = max_dist; } @@ -183,8 +198,9 @@ class BipartiteMatchKernel : public framework::OpKernel { ? 1 : static_cast(dist_mat->lod().back().size() - 1); if (dist_mat->lod().size()) { - PADDLE_ENFORCE_EQ(dist_mat->lod().size(), 1UL, - "Only support 1 level of LoD."); + PADDLE_ENFORCE_EQ( + dist_mat->lod().size(), 1UL, + platform::errors::InvalidArgument("Only support 1 level of LoD.")); } match_indices->mutable_data({n, col}, context.GetPlace()); match_dist->mutable_data({n, col}, context.GetPlace()); diff --git a/paddle/fluid/operators/detection/generate_mask_labels_op.cc b/paddle/fluid/operators/detection/generate_mask_labels_op.cc index b2e9e705fa..afa4ccf25d 100644 --- a/paddle/fluid/operators/detection/generate_mask_labels_op.cc +++ b/paddle/fluid/operators/detection/generate_mask_labels_op.cc @@ -40,35 +40,49 @@ class GenerateMaskLabelsOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("ImInfo"), "Input(ImInfo) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("GtClasses"), - "Input(GtClasses) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("IsCrowd"), - "Input(IsCrowd) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("GtSegms"), - "Input(GtSegms) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("Rois"), "Input(Rois) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("LabelsInt32"), - "Input(LabelsInt32) shouldn't be null."); - - PADDLE_ENFORCE( - ctx->HasOutput("MaskRois"), - "Output(MaskRois) of GenerateMaskLabelsOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("RoiHasMaskInt32"), - "Output(RoiHasMaskInt32) of GenerateMaskLabelsOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("MaskInt32"), - "Output(MaskInt32) of GenerateMaskLabelsOp should not be null"); + PADDLE_ENFORCE_EQ( + ctx->HasInput("ImInfo"), true, + platform::errors::InvalidArgument("Input(ImInfo) shouldn't be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("GtClasses"), true, + platform::errors::InvalidArgument( + "Input(GtClasses) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("IsCrowd"), true, + platform::errors::InvalidArgument("Input(IsCrowd) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("GtSegms"), true, + platform::errors::InvalidArgument("Input(GtSegms) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Rois"), true, + platform::errors::InvalidArgument("Input(Rois) shouldn't be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("LabelsInt32"), true, + platform::errors::InvalidArgument( + "Input(LabelsInt32) shouldn't be null.")); + + PADDLE_ENFORCE_EQ( + ctx->HasOutput("MaskRois"), true, + platform::errors::InvalidArgument( + "Output(MaskRois) of GenerateMaskLabelsOp should not be null")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("RoiHasMaskInt32"), true, + platform::errors::InvalidArgument( + "Output(RoiHasMaskInt32) of GenerateMaskLabelsOp " + "should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("MaskInt32"), true, + platform::errors::InvalidArgument( + "Output(MaskInt32) of GenerateMaskLabelsOp should not be null")); auto im_info_dims = ctx->GetInputDim("ImInfo"); auto gt_segms_dims = ctx->GetInputDim("GtSegms"); PADDLE_ENFORCE_EQ(im_info_dims.size(), 2, - "The rank of Input(ImInfo) must be 2."); + platform::errors::InvalidArgument( + "The rank of Input(ImInfo) must be 2.")); PADDLE_ENFORCE_EQ(gt_segms_dims.size(), 2, - "The rank of Input(GtSegms) must be 2."); + platform::errors::InvalidArgument( + "The rank of Input(GtSegms) must be 2.")); PADDLE_ENFORCE_EQ(gt_segms_dims[1], 2, - "The second dim of Input(GtSegms) must be 2."); + platform::errors::InvalidArgument( + "The second dim of Input(GtSegms) must be 2.")); int num_classes = ctx->Attrs().Get("num_classes"); int resolution = ctx->Attrs().Get("resolution"); @@ -134,7 +148,11 @@ std::vector SampleMaskForOneImage( const int* gt_classes_data = gt_classes.data(); const int* is_crowd_data = is_crowd.data(); const int* label_int32_data = label_int32.data(); - PADDLE_ENFORCE_EQ(roi_size, label_int32.dims()[0]); + PADDLE_ENFORCE_EQ(roi_size, label_int32.dims()[0], + platform::errors::InvalidArgument( + "The first dim of label [%d] is the different from " + "roi_size [%d], they should be same.", + label_int32.dims()[0], roi_size)); std::vector mask_gt_inds, fg_inds; std::vector>> gt_polys; @@ -155,7 +173,12 @@ std::vector SampleMaskForOneImage( for (int j = 0; j < poly_num; ++j) { int s = lod2[s_idx + j]; int e = lod2[s_idx + j + 1]; - PADDLE_ENFORCE_NE(s, e); + PADDLE_ENFORCE_NE(s, e, + platform::errors::InvalidArgument( + "The start point and the end point in the poly " + "segment [%d] should not be same, but received " + "the start point [%d] and the end point [%d].", + i, s, e)); std::vector plts(polys_data + s * 2, polys_data + e * 2); polys.push_back(plts); } @@ -295,19 +318,34 @@ class GenerateMaskLabelsKernel : public framework::OpKernel { int num_classes = ctx.Attr("num_classes"); int resolution = ctx.Attr("resolution"); - PADDLE_ENFORCE_EQ(gt_classes->lod().size(), 1UL, - "GenerateMaskLabelsOp gt_classes needs 1 level of LoD"); - PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL, - "GenerateMaskLabelsOp is_crowd needs 1 level of LoD"); + PADDLE_ENFORCE_EQ( + gt_classes->lod().size(), 1UL, + platform::errors::InvalidArgument( + "GenerateMaskLabelsOp gt_classes needs 1 level of LoD")); + PADDLE_ENFORCE_EQ( + is_crowd->lod().size(), 1UL, + platform::errors::InvalidArgument( + "GenerateMaskLabelsOp is_crowd needs 1 level of LoD")); PADDLE_ENFORCE_EQ(rois->lod().size(), 1UL, - "GenerateMaskLabelsOp rois needs 1 level of LoD"); - PADDLE_ENFORCE_EQ(label_int32->lod().size(), 1UL, - "GenerateMaskLabelsOp label_int32 needs 1 level of LoD"); - - PADDLE_ENFORCE_EQ(gt_segms->lod().size(), 3UL); + platform::errors::InvalidArgument( + "GenerateMaskLabelsOp rois needs 1 level of LoD")); + PADDLE_ENFORCE_EQ( + label_int32->lod().size(), 1UL, + platform::errors::InvalidArgument( + "GenerateMaskLabelsOp label_int32 needs 1 level of LoD")); + + PADDLE_ENFORCE_EQ( + gt_segms->lod().size(), 3UL, + platform::errors::InvalidArgument( + "GenerateMaskLabelsOp gt_segms needs 3 level of LoD")); int64_t n = static_cast(gt_classes->lod().back().size() - 1); - PADDLE_ENFORCE_EQ(gt_segms->lod()[0].size() - 1, n); + PADDLE_ENFORCE_EQ( + gt_segms->lod()[0].size() - 1, n, + platform::errors::InvalidArgument( + "Batchsize of Input(gt_segms) and Input(gt_classes) should be " + "same, but received gt_segms[%d], gt_classes[%d].", + gt_segms->lod()[0].size() - 1, n)); int mask_dim = num_classes * resolution * resolution; int roi_num = rois->lod().back()[n]; diff --git a/paddle/fluid/operators/detection/target_assign_op.cc b/paddle/fluid/operators/detection/target_assign_op.cc index 3c02796de0..1fda795d35 100644 --- a/paddle/fluid/operators/detection/target_assign_op.cc +++ b/paddle/fluid/operators/detection/target_assign_op.cc @@ -22,29 +22,41 @@ class TargetAssignOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of TargetAssignOp should not be null"); - PADDLE_ENFORCE(ctx->HasInput("MatchIndices"), - "Input(MatchIndices) of TargetAssignOp should not be null"); - - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of TargetAssignOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("OutWeight"), - "Output(OutWeight) of TargetAssignOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of TargetAssignOp should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("MatchIndices"), true, + platform::errors::InvalidArgument( + "Input(MatchIndices) of TargetAssignOp should not be null")); + + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of TargetAssignOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("OutWeight"), true, + platform::errors::InvalidArgument( + "Output(OutWeight) of TargetAssignOp should not be null.")); auto in_dims = ctx->GetInputDim("X"); auto mi_dims = ctx->GetInputDim("MatchIndices"); - PADDLE_ENFORCE_EQ(in_dims.size(), 3, "The rank of Input(X) must be 3."); + PADDLE_ENFORCE_EQ( + in_dims.size(), 3, + platform::errors::InvalidArgument("The rank of Input(X) must be 3.")); PADDLE_ENFORCE_EQ(mi_dims.size(), 2, - "The rank of Input(MatchIndices) must be 2."); + platform::errors::InvalidArgument( + "The rank of Input(MatchIndices) must be 2.")); if (ctx->HasInput("NegIndices")) { auto neg_dims = ctx->GetInputDim("NegIndices"); PADDLE_ENFORCE_EQ(neg_dims.size(), 2, - "The rank of Input(NegIndices) must be 2."); - PADDLE_ENFORCE_EQ(neg_dims[1], 1, - "The last dimension of Out(NegIndices) must be 1."); + platform::errors::InvalidArgument( + "The rank of Input(NegIndices) must be 2.")); + PADDLE_ENFORCE_EQ( + neg_dims[1], 1, + platform::errors::InvalidArgument( + "The last dimension of Out(NegIndices) must be 1.")); } auto n = mi_dims[0]; diff --git a/paddle/fluid/operators/detection/target_assign_op.h b/paddle/fluid/operators/detection/target_assign_op.h index 691e3276f9..da85e4c5e4 100644 --- a/paddle/fluid/operators/detection/target_assign_op.h +++ b/paddle/fluid/operators/detection/target_assign_op.h @@ -90,7 +90,9 @@ class TargetAssignKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto* out_wt = ctx.Output("OutWeight"); - PADDLE_ENFORCE_EQ(x->lod().size(), 1UL); + PADDLE_ENFORCE_EQ(x->lod().size(), 1UL, + platform::errors::InvalidArgument( + "TargetAssignOp input(X) needs 1 level of LoD")); int mismatch_value = ctx.Attr("mismatch_value"); const T* x_data = x->data(); @@ -121,7 +123,10 @@ class TargetAssignKernel : public framework::OpKernel { auto* neg_indices = ctx.Input("NegIndices"); if (neg_indices) { - PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL); + PADDLE_ENFORCE_EQ( + neg_indices->lod().size(), 1UL, + platform::errors::InvalidArgument( + "TargetAssignOp input(NegIndices) needs 1 level of LoD")); const int* neg_idx_data = neg_indices->data(); auto neg_lod = neg_indices->lod().back(); #if defined(PADDLE_WITH_CUDA) diff --git a/paddle/fluid/operators/filter_by_instag_op.cc b/paddle/fluid/operators/filter_by_instag_op.cc index 80b7066ab7..1b88d9de47 100644 --- a/paddle/fluid/operators/filter_by_instag_op.cc +++ b/paddle/fluid/operators/filter_by_instag_op.cc @@ -24,19 +24,25 @@ class FilterByInstagOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Ins"), true, - "Input(Ins) should be not null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Ins"), true, + platform::errors::InvalidArgument("Input(Ins) should be not null.")); PADDLE_ENFORCE_EQ(ctx->HasInput("Ins_tag"), true, - "Input(Ins_tag) should be not null."); + platform::errors::InvalidArgument( + "Input(Ins_tag) should be not null.")); PADDLE_ENFORCE_EQ(ctx->HasInput("Filter_tag"), true, - "Input(Filter_tag) should be not null."); + platform::errors::InvalidArgument( + "Input(Filter_tag) should be not null.")); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) should be not null."); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument("Output(Out) should be not null.")); PADDLE_ENFORCE_EQ(ctx->HasOutput("LossWeight"), true, - "Output(LossWeight) shoudl not be null."); + platform::errors::InvalidArgument( + "Output(LossWeight) shoudl not be null.")); PADDLE_ENFORCE_EQ(ctx->HasOutput("IndexMap"), true, - "Output(IndexMap) should be not null."); + platform::errors::InvalidArgument( + "Output(IndexMap) should be not null.")); auto x1_dims = ctx->GetInputDim("Ins"); // batch_size * vec @@ -85,15 +91,20 @@ class FilterByInstagOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("IndexMap"), true, - "Input(IndexMap) should be not null"); + platform::errors::InvalidArgument( + "Input(IndexMap) should be not null")); PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, - "Grad Input(Out) should be not null"); - PADDLE_ENFORCE_EQ(ctx->HasInput("Ins"), true, - "Input(Ins) should be not null"); + platform::errors::InvalidArgument( + "Grad Input(Out) should be not null")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Ins"), true, + platform::errors::InvalidArgument("Input(Ins) should be not null")); PADDLE_ENFORCE_EQ(ctx->HasInput("LossWeight"), true, - "Input(LossWeight) should be not null"); + platform::errors::InvalidArgument( + "Input(LossWeight) should be not null")); PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Ins")), true, - "Grad Output(Ins) should be not null"); + platform::errors::InvalidArgument( + "Grad Output(Ins) should be not null")); auto grad_out_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto x1_dims = ctx->GetInputDim("Ins"); diff --git a/paddle/fluid/operators/one_hot_v2_op.h b/paddle/fluid/operators/one_hot_v2_op.h index 7cfe2d61d1..221b8cf0e2 100644 --- a/paddle/fluid/operators/one_hot_v2_op.h +++ b/paddle/fluid/operators/one_hot_v2_op.h @@ -51,11 +51,19 @@ struct OneHotV2OpFunctor { } } else { for (int i = 0; i < numel; ++i) { - PADDLE_ENFORCE_GE(p_in_data[i], 0, - "Illegal index value, should be at least 0."); + PADDLE_ENFORCE_GE( + p_in_data[i], 0, + platform::errors::InvalidArgument( + "Illegal index value, Input(input) value should be at least 0, " + "but received input (%d) less than 0", + p_in_data[i])); PADDLE_ENFORCE_LT( p_in_data[i], depth_, - "Illegal index value, should be less than depth (%d).", depth_); + platform::errors::InvalidArgument( + "Illegal index value, Input(input) value should be less than " + "Input(depth), " + "but received input (%d) not less than depth (%d)", + p_in_data[i], depth_)); *(p_out_data + i * depth_ + p_in_data[i]) = 1.0; } } diff --git a/paddle/fluid/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc index 116fb01f71..d081ecb3a2 100644 --- a/paddle/fluid/operators/pool_with_index_op.cc +++ b/paddle/fluid/operators/pool_with_index_op.cc @@ -29,12 +29,15 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of Pooling should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of Pooling should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Mask"), - "Output(Mask) of Pooling should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of Pooling should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of Pooling should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Mask"), true, + platform::errors::InvalidArgument( + "Output(Mask) of Pooling should not be null.")); auto in_x_dims = ctx->GetInputDim("X"); @@ -54,12 +57,16 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { } } - PADDLE_ENFORCE(in_x_dims.size() - ksize.size() == 2U, - "Input size and pooling size should be consistent."); + PADDLE_ENFORCE_EQ(in_x_dims.size() - ksize.size(), 2U, + platform::errors::InvalidArgument( + "Input size and pooling size should be consistent.")); PADDLE_ENFORCE_EQ(ksize.size(), strides.size(), - "Strides size and pooling size should be the same."); - PADDLE_ENFORCE_EQ(ksize.size(), paddings.size(), - "Paddings size and pooling size should be the same."); + platform::errors::InvalidArgument( + "Strides size and pooling size should be the same.")); + PADDLE_ENFORCE_EQ( + ksize.size(), paddings.size(), + platform::errors::InvalidArgument( + "Paddings size and pooling size should be the same.")); std::vector output_shape({in_x_dims[0], in_x_dims[1]}); if (adaptive) { @@ -90,15 +97,16 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_EQ( ctx->HasInput("Mask"), true, - platform::errors::NotFound("Input(Mask) must not be null.")); - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - platform::errors::NotFound("Input(X) must not be null.")); - PADDLE_ENFORCE_EQ( - ctx->HasInput(framework::GradVarName("Out")), true, - platform::errors::NotFound("Input(Out@GRAD) should not be null.")); + platform::errors::InvalidArgument("Input(Mask) must not be null.")); PADDLE_ENFORCE_EQ( - ctx->HasOutput(framework::GradVarName("X")), true, - platform::errors::NotFound("Output(X@GRAD) should not be null.")); + ctx->HasInput("X"), true, + platform::errors::InvalidArgument("Input(X) must not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, + platform::errors::InvalidArgument( + "Input(Out@GRAD) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true, + platform::errors::InvalidArgument( + "Output(X@GRAD) should not be null.")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/fluid/operators/psroi_pool_op.cc b/paddle/fluid/operators/psroi_pool_op.cc index d87405db62..d3faa2c846 100644 --- a/paddle/fluid/operators/psroi_pool_op.cc +++ b/paddle/fluid/operators/psroi_pool_op.cc @@ -81,43 +81,57 @@ class PSROIPoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of PSROIPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("ROIs"), - "Input(ROIs) of PSROIPoolOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of PSROIPoolOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of PSROIPoolOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("ROIs"), true, + platform::errors::InvalidArgument( + "Input(ROIs) of PSROIPoolOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of PSROIPoolOp should not be null.")); auto input_dims = ctx->GetInputDim("X"); auto rois_dims = ctx->GetInputDim("ROIs"); - PADDLE_ENFORCE(input_dims.size() == 4, - "The format of input tensor is NCHW"); - PADDLE_ENFORCE(rois_dims.size() == 2, - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " - "given as [(x1, y1, x2, y2), ...]"); - PADDLE_ENFORCE(rois_dims[1] == 4, - "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " - "given as [(x1, y1, x2, y2), ...]"); + PADDLE_ENFORCE_EQ(input_dims.size(), 4, + platform::errors::InvalidArgument( + "The format of input tensor is NCHW")); + PADDLE_ENFORCE_EQ( + rois_dims.size(), 2, + platform::errors::InvalidArgument( + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " + "given as [(x1, y1, x2, y2), ...]")); + PADDLE_ENFORCE_EQ( + rois_dims[1], 4, + platform::errors::InvalidArgument( + "ROIs should be a 2-D LoDTensor of shape (num_rois, 4) " + "given as [(x1, y1, x2, y2), ...]")); int pooled_height = ctx->Attrs().Get("pooled_height"); int pooled_width = ctx->Attrs().Get("pooled_width"); int output_channels = ctx->Attrs().Get("output_channels"); float spatial_scale = ctx->Attrs().Get("spatial_scale"); - PADDLE_ENFORCE( - input_dims[1] == output_channels * pooled_height * pooled_width, - "the channel of X(%d) should be equal to the product of " - "output_channels(%d), pooled_height(%d) and pooled_width(%d)", - input_dims[1], output_channels, pooled_height, pooled_width); + PADDLE_ENFORCE_EQ( + input_dims[1], output_channels * pooled_height * pooled_width, + platform::errors::InvalidArgument( + "the channel of X(%d) " + "should be equal to the product of " + "output_channels(%d), pooled_height(%d) and pooled_width(%d)", + input_dims[1], output_channels, pooled_height, pooled_width)); PADDLE_ENFORCE_GT(pooled_height, 0, - "The pooled output height must be greater than 0"); + platform::errors::InvalidArgument( + "The pooled output height must be greater than 0")); PADDLE_ENFORCE_GT(pooled_width, 0, - "The pooled output width must be greater than 0"); + platform::errors::InvalidArgument( + "The pooled output width must be greater than 0")); PADDLE_ENFORCE_GT(output_channels, 1, - "The pooled output channels must greater than 1"); + platform::errors::InvalidArgument( + "The pooled output channels must greater than 1")); PADDLE_ENFORCE_GT(spatial_scale, 0.0f, - "The spatial scale must greater than 0."); + platform::errors::InvalidArgument( + "The spatial scale must greater than 0.")); auto out_dims = input_dims; out_dims[0] = rois_dims[0]; @@ -142,10 +156,12 @@ class PSROIPoolGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "The gradient of Out should not be null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "The gradient of X should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, + platform::errors::InvalidArgument( + "The gradient of Out should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true, + platform::errors::InvalidArgument( + "The gradient of X should not be null.")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/fluid/operators/psroi_pool_op.h b/paddle/fluid/operators/psroi_pool_op.h index 5666613f6e..4f4cb24844 100644 --- a/paddle/fluid/operators/psroi_pool_op.h +++ b/paddle/fluid/operators/psroi_pool_op.h @@ -54,15 +54,19 @@ class CPUPSROIPoolOpKernel : public framework::OpKernel { int rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, - "the rois_batch_size and input(X) batch_size should be the same."); + platform::errors::InvalidArgument("the rois_batch_size and input(X) " + "batch_size should be the same.")); int rois_num_with_lod = rois_lod[rois_batch_size]; PADDLE_ENFORCE_EQ(rois_num_with_lod, rois_num, - "the rois_num from input and lod must be the same"); + platform::errors::InvalidArgument( + "the rois_num from input and lod must be the same")); PADDLE_ENFORCE_EQ(input_channels, output_channels * pooled_height * pooled_width, - "the channels of input X should equal the product of " - "output_channels x pooled_height x pooled_width"); + platform::errors::InvalidArgument( + "the channels of input " + "X should equal the product of " + "output_channels x pooled_height x pooled_width")); // calculate batch id index for each roi according to LoD for (int n = 0; n < rois_batch_size; ++n) { diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 01e7406849..8a34cb35f6 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -36,7 +36,10 @@ class ROIPoolOp : public framework::OperatorWithKernel { if (ctx->HasInput("RoisLod")) { auto rois_lod_dims = ctx->GetInputDim("RoisLod"); - PADDLE_ENFORCE(rois_lod_dims.size() == 1, ""); + PADDLE_ENFORCE_EQ(rois_lod_dims.size(), 1, + platform::errors::InvalidArgument( + "The lod information tensor of ROIs should " + "be one-dimensional")); } PADDLE_ENFORCE_EQ(input_dims.size(), 4, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/roi_pool_op.h b/paddle/fluid/operators/roi_pool_op.h index 4a369bbb42..145b170ded 100644 --- a/paddle/fluid/operators/roi_pool_op.h +++ b/paddle/fluid/operators/roi_pool_op.h @@ -63,7 +63,8 @@ class CPUROIPoolOpKernel : public framework::OpKernel { rois_batch_size = rois_lod_t->numel(); PADDLE_ENFORCE_EQ( rois_batch_size - 1, batch_size, - "The rois_batch_size and imgs batch_size must be the same."); + platform::errors::InvalidArgument("The rois_batch_size and imgs " + "batch_size must be the same.")); auto* rois_lod = rois_lod_t->data(); for (int n = 0; n < rois_batch_size - 1; ++n) { for (int i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { @@ -75,10 +76,13 @@ class CPUROIPoolOpKernel : public framework::OpKernel { rois_batch_size = rois_lod.size() - 1; PADDLE_ENFORCE_EQ( rois_batch_size, batch_size, - "The rois_batch_size and imgs batch_size must be the same."); + platform::errors::InvalidArgument("The rois_batch_size and imgs " + "batch_size must be the same.")); int rois_num_with_lod = rois_lod[rois_batch_size]; - PADDLE_ENFORCE_EQ(rois_num, rois_num_with_lod, - "The rois_num from input and lod must be the same."); + PADDLE_ENFORCE_EQ( + rois_num, rois_num_with_lod, + platform::errors::InvalidArgument("The rois_num from input " + "and lod must be the same.")); for (int n = 0; n < rois_batch_size; ++n) { for (size_t i = rois_lod[n]; i < rois_lod[n + 1]; ++i) { roi_batch_id_data[i] = n; diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index 56dca92ea6..2a6ca7975f 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -34,21 +34,30 @@ class SoftmaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SoftmaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SoftmaxOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::NotFound("Input(X) of SoftmaxOp is not found.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::NotFound("Output(Out) of SoftmaxOp is not found.")); auto dim_x = ctx->GetInputDim("X"); auto rank_x = dim_x.size(); auto axis = ctx->Attrs().Get("axis"); - PADDLE_ENFORCE(axis >= -rank_x && axis < rank_x, - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(X)."); + PADDLE_ENFORCE_GE(axis, -rank_x, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(X).")); + PADDLE_ENFORCE_LT(axis, rank_x, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(X).")); auto use_cudnn = ctx->Attrs().Get("use_cudnn"); if (axis != rank_x - 1 && axis != -1) { - PADDLE_ENFORCE(!use_cudnn, "CUDNN kernel only support axis as -1."); + PADDLE_ENFORCE_EQ(use_cudnn, false, + platform::errors::InvalidArgument( + "CUDNN kernel only support axis as -1.")); } ctx->SetOutputDim("Out", ctx->GetInputDim("X")); @@ -78,8 +87,9 @@ class SoftmaxOp : public framework::OperatorWithKernel { auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); if (input_data_type == framework::proto::VarType::FP16) { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "float16 can only be used on GPU place"); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument( + "float16 can only be used on GPU place")); } return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, @@ -157,12 +167,17 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should be not null."); - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Out"), - ctx->GetInputDim(framework::GradVarName("Out")), - "Input(Out) and its gradients should have a same shape."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Out"), true, + platform::errors::InvalidArgument("Input(Out) is not found.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput(framework::GradVarName("Out")), true, + platform::errors::InvalidArgument("Input(Out@GRAD) is not found.")); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Out"), + ctx->GetInputDim(framework::GradVarName("Out")), + platform::errors::InvalidArgument("Input(Out) and its gradients " + "should have a same shape.")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim(framework::GradVarName("Out"))); @@ -191,8 +206,9 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { auto input_data_type = OperatorWithKernel::IndicateVarDataType( ctx, framework::GradVarName("Out")); if (input_data_type == framework::proto::VarType::FP16) { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "float16 can only be used on GPU place"); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::InvalidArgument( + "float16 can only be used on GPU place")); } return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index b82869c17e..b5b99d3a92 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -108,39 +108,51 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Logits"), - "Input(Logits) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - - PADDLE_ENFORCE(ctx->HasOutput("Softmax"), - "Output(Softmax) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Loss"), "Output(Loss) should be not null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Logits"), true, + platform::errors::InvalidArgument("Input(Logits) should be not null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Label"), true, + platform::errors::InvalidArgument("Input(Label) should be not null.")); + + PADDLE_ENFORCE_EQ(ctx->HasOutput("Softmax"), true, + platform::errors::InvalidArgument( + "Output(Softmax) should be not null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Loss"), true, + platform::errors::InvalidArgument("Output(Loss) should be not null.")); auto axis = ctx->Attrs().Get("axis"); auto logits_dims = ctx->GetInputDim("Logits"); auto labels_dims = ctx->GetInputDim("Label"); auto logits_rank = logits_dims.size(); - PADDLE_ENFORCE(axis >= -logits_rank && axis < logits_rank, - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(Logits)."); + PADDLE_ENFORCE_GE(axis, -logits_rank, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(Logits).")); + PADDLE_ENFORCE_LT(axis, logits_rank, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(Logits).")); axis = CanonicalAxis(axis, logits_rank); for (int i = 0; i < logits_rank; i++) { if (i != axis) { if (ctx->IsRuntime() || (logits_dims[i] > 0 && labels_dims[i] > 0)) { - PADDLE_ENFORCE_EQ( - logits_dims[i], labels_dims[i], - "Input(Logits) and Input(Label) should in same shape in " - "dimensions except axis."); + PADDLE_ENFORCE_EQ(logits_dims[i], labels_dims[i], + platform::errors::InvalidArgument( + "Input(Logits) and Input(Label) should in " + "same shape in dimensions except axis.")); } } } auto numeric_stable_mode = ctx->Attrs().Get("numeric_stable_mode"); if (axis != logits_rank - 1) { - PADDLE_ENFORCE( - numeric_stable_mode, - "Attr(axis) can only be -1 when not in numeric_stable_mode."); + PADDLE_ENFORCE_EQ(numeric_stable_mode, true, + platform::errors::InvalidArgument( + "Attr(axis) can only be -1 " + "when not in numeric_stable_mode.")); } bool soft_label = ctx->Attrs().Get("soft_label"); @@ -148,14 +160,18 @@ class SoftmaxWithCrossEntropyOp : public framework::OperatorWithKernel { if (ctx->IsRuntime() || (logits_dims[axis] > 0 && labels_dims[axis] > 0)) { PADDLE_ENFORCE_EQ(logits_dims[axis], labels_dims[axis], - "If Attr(soft_label) == true, the axis dimension of " - "Input(X) and Input(Label) should be equal."); + platform::errors::InvalidArgument( + "If Attr(soft_label) == true, " + "the axis dimension of " + "Input(X) and Input(Label) should be equal.")); } } else { if (ctx->IsRuntime() || labels_dims[axis] > 0) { - PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL, - "If Attr(soft_label) == false, the axis dimension of " - "Input(Label) should be 1."); + PADDLE_ENFORCE_EQ( + labels_dims[axis], 1UL, + platform::errors::InvalidArgument("If Attr(soft_label) == false, " + "the axis dimension of " + "Input(Label) should be 1.")); } } @@ -182,21 +198,31 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Loss")), - "Input(Loss@Grad) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Softmax"), - "Input(Softmax) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Logits")), - "Output(Logits@Grad) should be not null."); + PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Loss")), true, + platform::errors::InvalidArgument( + "Input(Loss@Grad) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Softmax"), true, + platform::errors::InvalidArgument( + "Input(Softmax) should be not null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Label"), true, + platform::errors::InvalidArgument("Input(Label) should be not null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Logits")), true, + platform::errors::InvalidArgument( + "Output(Logits@Grad) should be not null.")); auto axis = ctx->Attrs().Get("axis"); auto softmax_dims = ctx->GetInputDim("Softmax"); auto labels_dims = ctx->GetInputDim("Label"); auto softmax_rank = softmax_dims.size(); - PADDLE_ENFORCE(axis >= -softmax_rank && axis < softmax_rank, - "Attr(axis) value should be in range [-R, R-1], " - "R is the rank of Input(Logits)."); + PADDLE_ENFORCE_GE(axis, -softmax_rank, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(Logits).")); + PADDLE_ENFORCE_LT(axis, softmax_rank, + platform::errors::InvalidArgument( + "Attr(axis) value should be in range [-R, R-1], " + "R is the rank of Input(Logits).")); axis = CanonicalAxis(axis, softmax_rank); for (int i = 0; i < softmax_rank; i++) { @@ -204,8 +230,9 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { if (ctx->IsRuntime() || (softmax_dims[i] > 0 && labels_dims[i] > 0)) { PADDLE_ENFORCE_EQ( softmax_dims[i], labels_dims[i], - "Input(Logits) and Input(Label) should in same shape in " - "dimensions except axis."); + platform::errors::InvalidArgument( + "Input(Logits) and Input(Label) should in same shape in " + "dimensions except axis.")); } } } @@ -215,14 +242,18 @@ class SoftmaxWithCrossEntropyOpGrad : public framework::OperatorWithKernel { if (ctx->IsRuntime() || (softmax_dims[axis] > 0 && labels_dims[axis] > 0)) { PADDLE_ENFORCE_EQ(softmax_dims[axis], labels_dims[axis], - "If Attr(soft_label) == true, the axis dimension of " - "Input(X) and Input(Label) should be equal."); + platform::errors::InvalidArgument( + "If Attr(soft_label) == true, " + "the axis dimension of " + "Input(X) and Input(Label) should be equal.")); } } else { if (ctx->IsRuntime() || labels_dims[axis] > 0) { - PADDLE_ENFORCE_EQ(labels_dims[axis], 1UL, - "If Attr(soft_label) == false, the axis dimension of " - "Input(Label) should be 1."); + PADDLE_ENFORCE_EQ( + labels_dims[axis], 1UL, + platform::errors::InvalidArgument("If Attr(soft_label) == false, " + "the axis dimension of " + "Input(Label) should be 1.")); } } diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.h b/paddle/fluid/operators/softmax_with_cross_entropy_op.h index 4533295a8d..cebd466f36 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.h +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.h @@ -31,8 +31,9 @@ template class SoftmaxWithCrossEntropyKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - PADDLE_ENFORCE(platform::is_cpu_place(context.GetPlace()), - "This kernel only runs on CPU."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(context.GetPlace()), true, + platform::errors::Unimplemented("This kernel only runs on CPU.")); const Tensor* logits = context.Input("Logits"); const Tensor* labels = context.Input("Label"); Tensor* softmax = context.Output("Softmax"); diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index e9ed900ea0..e83d4f0ea4 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -62,15 +62,17 @@ class SppOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SppOp" - "should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SppOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of SppOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of SppOp should not be null.")); auto in_x_dims = ctx->GetInputDim("X"); int pyramid_height = ctx->Attrs().Get("pyramid_height"); - PADDLE_ENFORCE(in_x_dims.size() == 4, - "Spping intput must be of 4-dimensional."); + PADDLE_ENFORCE_EQ(in_x_dims.size(), 4, + platform::errors::InvalidArgument( + "Spping intput must be of 4-dimensional.")); int outlen = ((std::pow(4, pyramid_height) - 1) / (4 - 1)) * in_x_dims[1]; std::vector output_shape({in_x_dims[0], outlen}); ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); @@ -81,9 +83,12 @@ class SppOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Input(X@GRAD) should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::InvalidArgument("Input(X) must not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput(framework::GradVarName("X")), true, + platform::errors::InvalidArgument("Input(X@GRAD) should not be null.")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } }; diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc index c12f59ce92..e191481f3b 100644 --- a/paddle/fluid/operators/unsqueeze_op.cc +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -27,16 +27,22 @@ class UnsqueezeOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of Unsqueeze operator should not be null."); + platform::errors::InvalidArgument( + "Input(X) of " + "Unsqueeze operator should not be null.")); PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of Unsqueeze operator should not be null."); + platform::errors::InvalidArgument( + "Output(Out) of " + "Unsqueeze operator should not be null.")); const auto &axes = ctx->Attrs().Get>("axes"); const auto &x_dims = ctx->GetInputDim("X"); // Validity Check: input tensor dims (<6). PADDLE_ENFORCE_LE(x_dims.size(), 6, - "Invalid dimensions, the rank of Input(X) " - "should be in the range of [1, 6] (Eigen limit)"); + platform::errors::InvalidArgument( + "Invalid " + "dimensions, the rank of Input(X) " + "should be in the range of [1, 6] (Eigen limit)")); if (!axes.empty()) { auto out_dims = GetOutputShape(axes, x_dims); ctx->SetOutputDim("Out", out_dims); @@ -49,24 +55,29 @@ class UnsqueezeOp : public framework::OperatorWithKernel { auto AxesTensorList = ctx->Inputs("AxesTensorList"); int output_size = x_dims.size() + static_cast(AxesTensorList.size()); PADDLE_ENFORCE_LE(output_size, 6, - "The output tensor's rank should be less than 6."); + platform::errors::InvalidArgument( + "The output tensor's rank should be less than 6.")); std::vector vec_out_dims(output_size, -1); ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims)); } else if (ctx->HasInput("AxesTensor")) { auto axes_dims = ctx->GetInputDim("AxesTensor"); - PADDLE_ENFORCE_EQ( - axes_dims.size(), 1, - "Input(AxesTensor)'s dimension of Op(unsqueeze) must be 1. " - "But received AxesTensor's shape = [%s], " - "AxesTensor's dimension = %d.", - axes_dims, axes_dims.size()); - PADDLE_ENFORCE_GE(axes_dims[0], 0, - "Input(AxesTensor)'s shape must be known. But received " - "AxesTensor's shape = [%s]", - axes_dims); + PADDLE_ENFORCE_EQ(axes_dims.size(), 1, + platform::errors::InvalidArgument( + "Input(AxesTensor)'s dimension of " + "Op(unsqueeze) must be 1. " + "But received AxesTensor's shape = [%s], " + "AxesTensor's dimension = %d.", + axes_dims, axes_dims.size())); + PADDLE_ENFORCE_GE( + axes_dims[0], 0, + platform::errors::InvalidArgument( + "Input(AxesTensor)'s shape must be known. But received " + "AxesTensor's shape = [%s]", + axes_dims)); int output_size = x_dims.size() + static_cast(axes_dims[0]); PADDLE_ENFORCE_LE(output_size, 6, - "The output tensor's rank should be less than 6."); + platform::errors::InvalidArgument( + "The output tensor's rank should be less than 6.")); std::vector vec_out_dims(output_size, -1); ctx->SetOutputDim("Out", framework::make_ddim(vec_out_dims)); } @@ -80,13 +91,19 @@ class UnsqueezeOp : public framework::OperatorWithKernel { // Validity Check: rank range. PADDLE_ENFORCE_LE(output_size, 6, - "The output tensor's rank should be less than 6."); + platform::errors::InvalidArgument( + "The output tensor's rank should be less than 6.")); for (int axis : unsqz_dims) { int cur = axis < 0 ? axis + cur_output_size + 1 : axis; // Vaildity Check: the axis bound - PADDLE_ENFORCE_GE(cur, 0); - PADDLE_ENFORCE_LE(cur, cur_output_size); + PADDLE_ENFORCE_GE(cur, 0, platform::errors::InvalidArgument( + "The insert dimension value should " + "not be less than 0")); + PADDLE_ENFORCE_LE(cur, cur_output_size, + platform::errors::InvalidArgument( + "The insert dimension value shoud not be larger " + "than the dimension size of input tensor")); // Move old axis, and insert new axis for (int i = cur_output_size; i >= cur; --i) { if (output_shape[i] == 1) { @@ -151,13 +168,17 @@ class UnsqueezeOpMaker : public framework::OpProtoAndCheckerMaker { .AddCustomChecker([](const std::vector &axes) { // Validity Check: axes dims (<6). PADDLE_ENFORCE_LT(static_cast(axes.size()), 6, - "Invalid dimensions, dynamic dimensions should be " - "within [1, 6] dimensions (Eigen limit)."); + platform::errors::InvalidArgument( + "Invalid " + "dimensions, dynamic dimensions should be " + "within [1, 6] dimensions (Eigen limit).")); // Validity Check: the range of unsqueeze axis. for (int axis : axes) { PADDLE_ENFORCE_LT(axis, 6, - "Invalid dimensions, input axis should be" - " within [1, 6] dimensions (Eigen limit)."); + platform::errors::InvalidArgument( + "Invalid " + "dimensions, input axis should be" + "within [1, 6] dimensions (Eigen limit).")); } }); AddComment(R"DOC( @@ -219,7 +240,8 @@ class Unsqueeze2Op : public UnsqueezeOp { PADDLE_ENFORCE_EQ( ctx->HasOutput("XShape"), true, - "Output(XShape) of Unsqueeze operator should not be null."); + platform::errors::InvalidArgument("Output(XShape) of Unsqueeze " + "operator should not be null.")); std::vector xshape_dims(x_dims.size() + 1); xshape_dims[0] = 0; for (int i = 0; i < x_dims.size(); ++i) { @@ -259,10 +281,12 @@ class Unsqueeze2GradOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE_EQ(context->HasInput("XShape"), true, - "Input(XShape) shouldn't be null."); + PADDLE_ENFORCE_EQ( + context->HasInput("XShape"), true, + platform::errors::InvalidArgument("Input(XShape) shouldn't be null.")); PADDLE_ENFORCE_EQ(context->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) shouldn't be null."); + platform::errors::InvalidArgument( + "Input(Out@GRAD) shouldn't be null.")); auto xshape_dims = context->GetInputDim("XShape"); auto x_dims = framework::slice_ddim(xshape_dims, 1, xshape_dims.size()); context->SetOutputDim(framework::GradVarName("X"), x_dims); diff --git a/paddle/fluid/operators/unsqueeze_op.h b/paddle/fluid/operators/unsqueeze_op.h index 22818bf81b..d7a1e0ed3b 100644 --- a/paddle/fluid/operators/unsqueeze_op.h +++ b/paddle/fluid/operators/unsqueeze_op.h @@ -66,13 +66,20 @@ class UnsqueezeKernel : public framework::OpKernel { // Validity Check: rank range. PADDLE_ENFORCE_LE(output_size, 6, - "The output tensor's rank should be less than 6."); + platform::errors::InvalidArgument( + "The output " + "tensor's rank should be less than 6.")); for (int axis : unsqz_dims) { int cur = axis < 0 ? axis + cur_output_size + 1 : axis; // Vaildity Check: the axis bound - PADDLE_ENFORCE_GE(cur, 0); - PADDLE_ENFORCE_LE(cur, cur_output_size); + PADDLE_ENFORCE_GE(cur, 0, platform::errors::InvalidArgument( + "The insert dimension value should " + "not be less than 0")); + PADDLE_ENFORCE_LE(cur, cur_output_size, + platform::errors::InvalidArgument( + "The insert dimension value shoule not be larger " + "than the dimension size of input tensor")); // Move old axis, and insert new axis for (int i = cur_output_size; i >= cur; --i) { if (output_shape[i] == 1) { -- GitLab