diff --git a/paddle/fluid/operators/affine_grid_op.cc b/paddle/fluid/operators/affine_grid_op.cc index 961d620a074ef9e4eeec285a2c0fd12907fba5b4..f7cc513b234e6e440507af28189ac236b71f9d15 100644 --- a/paddle/fluid/operators/affine_grid_op.cc +++ b/paddle/fluid/operators/affine_grid_op.cc @@ -42,29 +42,57 @@ class AffineGridOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Theta"), - "Input(Theta) of AffineGridOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Output"), - "Output(Output) of AffineGridOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Theta"), true, + platform::errors::NotFound( + "The input 'Theta' of AffineGridOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Output"), true, + platform::errors::NotFound( + "The output 'Output' of AffineGridOp is not found.")); auto theta_dims = ctx->GetInputDim("Theta"); - PADDLE_ENFORCE(theta_dims.size() == 3, - "AffineGrid's Input(Theta) should be 3-D tensor."); + PADDLE_ENFORCE_EQ( + theta_dims.size(), 3, + platform::errors::InvalidArgument( + "The input Theta's dimensions size should be 3. But received " + "Theta's demensions size=[%d], Theta's dimensions=[%s].", + theta_dims.size(), theta_dims)); auto output_shape = ctx->Attrs().Get>("output_shape"); if (output_shape.size() == 0) { - PADDLE_ENFORCE(ctx->HasInput("OutputShape"), - "Input(OutputShape) of AffineGridOp should not be null if " - "attr(output_shape) is not configured."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("OutputShape"), true, + platform::errors::NotFound( + "The input 'OutputShape' of AffineGridOp should not be null if " + "'output_shape' is not configured.")); auto output_shape_dims = ctx->GetInputDim("OutputShape"); - PADDLE_ENFORCE(output_shape_dims.size() == 1, - "AffineGrid's Input(OutputShape) should be 1-D tensor."); + PADDLE_ENFORCE_EQ( + output_shape_dims.size(), 1, + platform::errors::InvalidArgument( + "The dimesions size of input OutputShape in AffineGridOp should " + "be 1. But received OutputShape's dimesions size=[%d], " + "OutputShape's dimesions=[%s]", + output_shape_dims.size(), output_shape_dims)); } else { - PADDLE_ENFORCE(output_shape.size() == 4, - "The size of attr(output_shape) should be 4."); + PADDLE_ENFORCE_EQ( + output_shape.size(), 4, + platform::errors::InvalidArgument( + "The size of attribute 'output_shape' in AffineGridOp should be " + "4. But received output_shape's size=[%d].", + output_shape.size())); } - PADDLE_ENFORCE(theta_dims[1] == 2, "Input(theta) dims[1] should be 2."); - PADDLE_ENFORCE(theta_dims[2] == 3, "Input(theta) dims[2] should be 3."); + PADDLE_ENFORCE_EQ( + theta_dims[1], 2, + platform::errors::InvalidArgument( + "The second dimesion of input 'theta' in AffineGridOp should be 2. " + "But received second dimesion=[%d], dimesions=[%s]", + theta_dims[1], theta_dims)); + PADDLE_ENFORCE_EQ( + theta_dims[2], 3, + platform::errors::InvalidArgument( + "The third dimesion of input 'theta' in AffineGridOp should be 3. " + "But received third dimesion=[%d], dimesions=[%s]", + theta_dims[2], theta_dims)); + // N * H * W * 2 ctx->SetOutputDim("Output", framework::make_ddim({theta_dims[0], -1, -1, 2})); diff --git a/paddle/fluid/operators/detection/generate_proposal_labels_op.cc b/paddle/fluid/operators/detection/generate_proposal_labels_op.cc index 79780e0d4eeadb537dfe8975a8e61aba19adb7ca..884aa1f6f4e9964009e6d29c6cf6f5dc5c218cae 100644 --- a/paddle/fluid/operators/detection/generate_proposal_labels_op.cc +++ b/paddle/fluid/operators/detection/generate_proposal_labels_op.cc @@ -38,42 +38,64 @@ class GenerateProposalLabelsOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("RpnRois"), - "Input(RpnRois) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("GtClasses"), - "Input(GtClasses) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("IsCrowd"), - "Input(IsCrowd) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("GtBoxes"), - "Input(GtBoxes) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("ImInfo"), "Input(ImInfo) shouldn't be null."); - - PADDLE_ENFORCE( - ctx->HasOutput("Rois"), - "Output(Rois) of GenerateProposalLabelsOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("LabelsInt32"), - "Output(LabelsInt32) of GenerateProposalLabelsOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("BboxTargets"), - "Output(BboxTargets) of GenerateProposalLabelsOp should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("BboxInsideWeights"), - "Output(BboxInsideWeights) of GenerateProposalLabelsOp " - "should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("BboxOutsideWeights"), - "Output(BboxOutsideWeights) of GenerateProposalLabelsOp " - "should not be null"); + PADDLE_ENFORCE_EQ( + ctx->HasInput("RpnRois"), true, + platform::errors::NotFound("Input(RpnRois) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("GtClasses"), true, + platform::errors::NotFound("Input(GtClasses) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("IsCrowd"), true, + platform::errors::NotFound("Input(IsCrowd) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("GtBoxes"), true, + platform::errors::NotFound("Input(GtBoxes) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("ImInfo"), true, + platform::errors::NotFound("Input(ImInfo) shouldn't be null.")); + + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Rois"), true, + platform::errors::NotFound( + "Output(Rois) of GenerateProposalLabelsOp should not be null")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("LabelsInt32"), true, + platform::errors::NotFound("Output(LabelsInt32) of " + "GenerateProposalLabelsOp " + "should not be null")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("BboxTargets"), true, + platform::errors::NotFound("Output(BboxTargets) of " + "GenerateProposalLabelsOp " + "should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("BboxInsideWeights"), true, + platform::errors::NotFound( + "Output(BboxInsideWeights) of GenerateProposalLabelsOp " + "should not be null")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("BboxOutsideWeights"), true, + platform::errors::NotFound( + "Output(BboxOutsideWeights) of GenerateProposalLabelsOp " + "should not be null")); auto rpn_rois_dims = ctx->GetInputDim("RpnRois"); auto gt_boxes_dims = ctx->GetInputDim("GtBoxes"); auto im_info_dims = ctx->GetInputDim("ImInfo"); PADDLE_ENFORCE_EQ(rpn_rois_dims.size(), 2, - "The rank of Input(RpnRois) must be 2."); + platform::errors::InvalidArgument( + "The dimensions size of Input(RpnRois) must be 2. " + "But received dimensions size=[%d], dimensions=[%s].", + rpn_rois_dims.size(), rpn_rois_dims)); PADDLE_ENFORCE_EQ(gt_boxes_dims.size(), 2, - "The rank of Input(GtBoxes) must be 2."); + platform::errors::InvalidArgument( + "The dimensions size of Input(GtBoxes) must be 2. " + "But received dimensions size=[%d], dimensions=[%s].", + gt_boxes_dims.size(), gt_boxes_dims)); PADDLE_ENFORCE_EQ(im_info_dims.size(), 2, - "The rank of Input(ImInfo) must be 2."); + platform::errors::InvalidArgument( + "The dimensions size of Input(ImInfo) must be 2. But " + "received dimensions size=[%d], dimensions=[%s].", + im_info_dims.size(), im_info_dims)); int class_nums = ctx->Attrs().Get("class_nums"); @@ -399,15 +421,30 @@ class GenerateProposalLabelsKernel : public framework::OpKernel { bool use_random = context.Attr("use_random"); bool is_cascade_rcnn = context.Attr("is_cascade_rcnn"); bool is_cls_agnostic = context.Attr("is_cls_agnostic"); - PADDLE_ENFORCE_EQ(rpn_rois->lod().size(), 1UL, - "GenerateProposalLabelsOp rpn_rois needs 1 level of LoD"); + PADDLE_ENFORCE_EQ( + rpn_rois->lod().size(), 1UL, + platform::errors::InvalidArgument( + "GenerateProposalLabelsOp rpn_rois needs 1 level of LoD. But " + "received level of LoD is [%d], LoD is [%s].", + rpn_rois->lod().size(), rpn_rois->lod())); PADDLE_ENFORCE_EQ( gt_classes->lod().size(), 1UL, - "GenerateProposalLabelsOp gt_classes needs 1 level of LoD"); - PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL, - "GenerateProposalLabelsOp is_crowd needs 1 level of LoD"); - PADDLE_ENFORCE_EQ(gt_boxes->lod().size(), 1UL, - "GenerateProposalLabelsOp gt_boxes needs 1 level of LoD"); + platform::errors::InvalidArgument( + "GenerateProposalLabelsOp gt_classes needs 1 level of LoD. But " + "received level of LoD is [%d], LoD is [%s].", + gt_classes->lod().size(), gt_classes->lod())); + PADDLE_ENFORCE_EQ( + is_crowd->lod().size(), 1UL, + platform::errors::InvalidArgument( + "GenerateProposalLabelsOp is_crowd needs 1 level of LoD. But " + "received level of LoD is [%d], LoD is [%s].", + is_crowd->lod().size(), is_crowd->lod())); + PADDLE_ENFORCE_EQ( + gt_boxes->lod().size(), 1UL, + platform::errors::InvalidArgument( + "GenerateProposalLabelsOp gt_boxes needs 1 level of LoD. But " + "received level of LoD is [%d], LoD is [%s].", + gt_boxes->lod().size(), gt_boxes->lod())); int64_t n = static_cast(rpn_rois->lod().back().size() - 1); rois->mutable_data({n * batch_size_per_im, kBoxDim}, context.GetPlace()); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index f9b82b66185002b1aca1c0d76430ef1a09b72854..663fabd3c456185d73510a0d7570d534316b38da 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -43,14 +43,21 @@ class GenerateProposalsOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Scores"), "Input(Scores) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("BboxDeltas"), - "Input(BboxDeltas) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("ImInfo"), "Input(ImInfo) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("Anchors"), - "Input(Anchors) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("Variances"), - "Input(Variances) shouldn't be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Scores"), true, + platform::errors::NotFound("Input(Scores) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("BboxDeltas"), true, + platform::errors::NotFound("Input(BboxDeltas) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("ImInfo"), true, + platform::errors::NotFound("Input(ImInfo) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Anchors"), true, + platform::errors::NotFound("Input(Anchors) shouldn't be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Variances"), true, + platform::errors::NotFound("Input(Variances) shouldn't be null.")); ctx->SetOutputDim("RpnRois", {-1, 4}); ctx->SetOutputDim("RpnRoiProbs", {-1, 1}); @@ -247,7 +254,6 @@ static inline Tensor VectorToTensor(const std::vector &selected_indices, template static inline Tensor NMS(const platform::DeviceContext &ctx, Tensor *bbox, Tensor *scores, T nms_threshold, float eta) { - PADDLE_ENFORCE_NOT_NULL(bbox); int64_t num_boxes = bbox->dims()[0]; // 4: [xmin ymin xmax ymax] int64_t box_size = bbox->dims()[1]; diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cu b/paddle/fluid/operators/detection/generate_proposals_op.cu index 1144bff68da61b440a1b333bce5afe39c47ea4da..31f68297447c5807c981c9e00ca9736d1aac9e1b 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cu +++ b/paddle/fluid/operators/detection/generate_proposals_op.cu @@ -379,7 +379,11 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { float nms_thresh = context.Attr("nms_thresh"); float min_size = context.Attr("min_size"); float eta = context.Attr("eta"); - PADDLE_ENFORCE_GE(eta, 1., "Not support adaptive NMS."); + PADDLE_ENFORCE_GE(eta, 1., + platform::errors::InvalidArgument( + "Not support adaptive NMS. The attribute 'eta' " + "should not less than 1. But received eta=[%d]", + eta)); auto &dev_ctx = context.template device_context(); diff --git a/paddle/fluid/operators/detection/rpn_target_assign_op.cc b/paddle/fluid/operators/detection/rpn_target_assign_op.cc index 64db18092bd30b25fc3f90da9a38291d4c56379d..2a16e20c2a7235758ad79cf279c927c7e57a108a 100644 --- a/paddle/fluid/operators/detection/rpn_target_assign_op.cc +++ b/paddle/fluid/operators/detection/rpn_target_assign_op.cc @@ -31,40 +31,44 @@ class RpnTargetAssignOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Anchor"), - "Input(Anchor) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE(ctx->HasInput("GtBoxes"), - "Input(GtBoxes) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE(ctx->HasInput("IsCrowd"), - "Input(Anchor) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE(ctx->HasInput("ImInfo"), - "Input(ImInfo) of RpnTargetAssignOp should not be null"); - - PADDLE_ENFORCE( - ctx->HasOutput("LocationIndex"), - "Output(LocationIndex) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("ScoreIndex"), - "Output(ScoreIndex) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("TargetLabel"), - "Output(TargetLabel) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("TargetBBox"), - "Output(TargetBBox) of RpnTargetAssignOp should not be null"); - PADDLE_ENFORCE( - ctx->HasOutput("BBoxInsideWeight"), - "Output(BBoxInsideWeight) of RpnTargetAssignOp should not be null"); + OP_INOUT_CHECK(ctx->HasInput("Anchor"), "Input", "Anchor", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasInput("GtBoxes"), "Input", "GtBoxes", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasInput("IsCrowd"), "Input", "IsCrowd", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasInput("ImInfo"), "Input", "ImInfo", + "rpn_target_assign"); + + OP_INOUT_CHECK(ctx->HasOutput("LocationIndex"), "Output", "LocationIndex", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasOutput("ScoreIndex"), "Output", "ScoreIndex", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasOutput("TargetLabel"), "Output", "TargetLabel", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasOutput("TargetBBox"), "Output", "TargetBBox", + "rpn_target_assign"); + OP_INOUT_CHECK(ctx->HasOutput("BBoxInsideWeight"), "Output", + "BBoxInsideWeight", "rpn_target_assign"); auto anchor_dims = ctx->GetInputDim("Anchor"); auto gt_boxes_dims = ctx->GetInputDim("GtBoxes"); auto im_info_dims = ctx->GetInputDim("ImInfo"); PADDLE_ENFORCE_EQ(anchor_dims.size(), 2, - "The rank of Input(Anchor) must be 2."); + platform::errors::InvalidArgument( + "The dimensions size of Input(Anchor) must be 2. But " + "received dimensions size=[%d], dimensions=[%s].", + anchor_dims.size(), anchor_dims)); PADDLE_ENFORCE_EQ(gt_boxes_dims.size(), 2, - "The rank of Input(GtBoxes) must be 2."); + platform::errors::InvalidArgument( + "The dimensions size of Input(GtBoxes) must be 2. " + "But received dimensions size=[%d], dimensions=[%s].", + gt_boxes_dims.size(), gt_boxes_dims)); PADDLE_ENFORCE_EQ(im_info_dims.size(), 2, - "The rank of Input(ImInfo) must be 2."); + platform::errors::InvalidArgument( + "The dimensions size of Input(ImInfo) must be 2. But " + "received dimensions size=[%d], dimensions=[%s].", + im_info_dims.size(), im_info_dims)); ctx->SetOutputDim("LocationIndex", {-1}); ctx->SetOutputDim("ScoreIndex", {-1}); @@ -357,9 +361,15 @@ class RpnTargetAssignKernel : public framework::OpKernel { auto* bbox_inside_weight = context.Output("BBoxInsideWeight"); PADDLE_ENFORCE_EQ(gt_boxes->lod().size(), 1UL, - "RpnTargetAssignOp gt_boxes needs 1 level of LoD"); + platform::errors::InvalidArgument( + "RpnTargetAssignOp gt_boxes needs 1 level of LoD. " + "But received level of LoD is [%d], LoD is [%s].", + gt_boxes->lod().size(), gt_boxes->lod())); PADDLE_ENFORCE_EQ(is_crowd->lod().size(), 1UL, - "RpnTargetAssignOp is_crowd needs 1 level of LoD"); + platform::errors::InvalidArgument( + "RpnTargetAssignOp is_crowd needs 1 level of LoD. " + "But received level of LoD is [%d], LoD is [%s].", + is_crowd->lod().size(), is_crowd->lod())); int64_t anchor_num = static_cast(anchor->dims()[0]); int64_t batch_num = static_cast(gt_boxes->lod().back().size() - 1); @@ -479,8 +489,20 @@ class RpnTargetAssignKernel : public framework::OpKernel { lod0_score.emplace_back(total_score_num); } - PADDLE_ENFORCE_LE(total_loc_num, max_num); - PADDLE_ENFORCE_LE(total_score_num, max_num); + PADDLE_ENFORCE_LE( + total_loc_num, max_num, + platform::errors::InvalidArgument( + "The number of sampled bboxes should not be greater than the " + "number of all anchor boxes(%d), but the number of sampled " + "bboxes is :%d.", + max_num, total_loc_num)); + PADDLE_ENFORCE_LE( + total_score_num, max_num, + platform::errors::InvalidArgument( + "The number of sampled scores should not be greater than the " + "number of all anchor boxes(%d), but the number of sampled " + "scores is :%d.", + max_num, total_score_num)); lod_loc.emplace_back(lod0_loc); loc_score.emplace_back(lod0_score); diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 03bb12f1c8307caf4a1d651d7509a9fe42915f15..bb5cbfccfa2f1ac7c1ee05ebebfecc0b5349d710 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -26,14 +26,20 @@ class Im2SequenceOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of Im2SequenceOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of Im2SequenceOp op should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::NotFound( + "The input 'X' of Im2SequenceOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::NotFound( + "The output 'Out' of Im2SequenceOp is not found.")); auto in_dim = ctx->GetInputDim("X"); - PADDLE_ENFORCE_EQ(in_dim.size(), 4, - "Input(X) format must be 4D tensor, eg., NCHW."); + PADDLE_ENFORCE_EQ( + in_dim.size(), 4, + platform::errors::InvalidArgument( + "The dimesions size of input 'X' in Im2SequenceOp should be 4. But " + "received dimesions size=[%d], dimesions=[%s].", + in_dim.size(), in_dim)); auto img_channels = in_dim[1]; auto kernels = ctx->Attrs().Get>("kernels"); @@ -146,9 +152,13 @@ class Im2SequenceGradOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) shouldn't be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::NotFound( + "The input 'X' of Im2SequenceGradOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, + platform::errors::NotFound( + "The input %s of Im2SequenceGradOp is not found.", + framework::GradVarName("Out"))); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } }; diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index c3ae25bf47a6959aadd0dfd8f61ab945af90ee44..ac0405b9a6e64a3ea53203533ae4e6787a24c2fe 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -28,18 +28,24 @@ class LabelSmoothOp : public framework::OperatorWithKernel { : OperatorWithKernel(type, inputs, outputs, attrs) {} void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of LabelSmoothOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of LabelSmoothOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::NotFound( + "The input 'X' of LabelSmoothOp is not found.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::NotFound( + "The output 'Out' of LabelSmoothOp is not found.")); auto in_dims = ctx->GetInputDim("X"); if (ctx->HasInput("PriorDist")) { auto noise_dims = ctx->GetInputDim("PriorDist"); auto noise_numel = paddle::framework::product(noise_dims); - PADDLE_ENFORCE( - in_dims[in_dims.size() - 1] == noise_numel, - "The number of elements in Input(PriorDist) must be equal to the " - "dimension of each label."); + PADDLE_ENFORCE_EQ( + in_dims[in_dims.size() - 1], noise_numel, + platform::errors::InvalidArgument( + "The number of elements in input 'PriorDist' must be equal to " + "the " + "dimension of each label. But received each label's " + "dimension=[%d], number of elements in input 'PriorDist' is [%d]", + in_dims[in_dims.size() - 1], noise_numel)); } ctx->ShareLoD("X", /*->*/ "Out"); ctx->SetOutputDim("Out", in_dims); diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index 857b872ae47e294be98ca5336572f04fe24b6f7e..7fb26993b952063f95c08b600ce98d94742baf92 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -406,6 +406,22 @@ def rpn_target_assign(bbox_pred, """ helper = LayerHelper('rpn_target_assign', **locals()) + + check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'], + 'rpn_target_assign') + check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'], + 'rpn_target_assign') + check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'], + 'rpn_target_assign') + check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'], + 'rpn_target_assign') + check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'], + 'rpn_target_assign') + check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], + 'rpn_target_assign') + check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], + 'rpn_target_assign') + # Assign target label to anchors loc_index = helper.create_variable_for_type_inference(dtype='int32') score_index = helper.create_variable_for_type_inference(dtype='int32') @@ -2541,6 +2557,13 @@ def generate_proposal_labels(rpn_rois, helper = LayerHelper('generate_proposal_labels', **locals()) + check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'], + 'generate_proposal_labels') + check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'], + 'generate_proposal_labels') + check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'], + 'generate_proposal_labels') + rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype) labels_int32 = helper.create_variable_for_type_inference( dtype=gt_classes.dtype) @@ -2775,7 +2798,7 @@ def generate_proposals(scores, im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin image information for N batch. Height and width are the input sizes and scale is the ratio of network input size and original size. - The data type must be int32. + The data type can be float32 or float64. anchors(Variable): A 4-D Tensor represents the anchors with a layout of [H, W, A, 4]. H and W are height and width of the feature map, num_anchors is the box count of each position. Each anchor is @@ -2818,6 +2841,17 @@ def generate_proposals(scores, """ helper = LayerHelper('generate_proposals', **locals()) + check_variable_and_dtype(scores, 'scores', ['float32'], + 'generate_proposals') + check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'], + 'generate_proposals') + check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'], + 'generate_proposals') + check_variable_and_dtype(anchors, 'anchors', ['float32'], + 'generate_proposals') + check_variable_and_dtype(variances, 'variances', ['float32'], + 'generate_proposals') + rpn_rois = helper.create_variable_for_type_inference( dtype=bbox_deltas.dtype) rpn_roi_probs = helper.create_variable_for_type_inference( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f519ab9bb7ccad360d8ae6fc128764d0053b387a..029dc1609905b40479b5f0e33ca35be077b19918 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6878,6 +6878,8 @@ def im2sequence(input, assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") + check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence') + if isinstance(filter_size, int): filter_size = [filter_size, filter_size] if isinstance(stride, int): @@ -8032,7 +8034,7 @@ def label_smooth(label, label(Variable): The input variable containing the label data. The label data should use one-hot representation. It's a multidimensional tensor with a shape of - :math:`[N_1, ..., Depth]`, where Depth is class number. + :math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64". prior_dist(Variable, optional): The prior distribution to be used to smooth labels. If not provided, an uniform distribution is used. It's a multidimensional tensor with a shape of @@ -8055,7 +8057,7 @@ def label_smooth(label, import paddle.fluid as fluid import paddle.fluid.layers as layers - label = layers.data(name="label", shape=[1], dtype="float32") + label = layers.data(name="label", shape=[1], dtype="int32") one_hot_label = layers.one_hot(input=label, depth=10) smooth_label = layers.label_smooth( label=one_hot_label, epsilon=0.1, dtype="float32") @@ -8067,6 +8069,9 @@ def label_smooth(label, return core.ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon)) + check_variable_and_dtype(label, 'label', ['float32', 'float64'], + 'label_smooth') + helper = LayerHelper("label_smooth", **locals()) label.stop_gradient = True smooth_label = helper.create_variable_for_type_inference(dtype) @@ -10182,6 +10187,9 @@ def affine_grid(theta, out_shape, name=None): """ helper = LayerHelper('affine_grid') + check_variable_and_dtype(theta, 'theta', ['float32', 'float64'], + 'affine_grid') + if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \ isinstance(out_shape, Variable)): raise ValueError("The out_shape should be a list, tuple or Variable.") @@ -10194,6 +10202,8 @@ def affine_grid(theta, out_shape, name=None): attrs = {} if isinstance(out_shape, Variable): ipts['OutputShape'] = out_shape + check_variable_and_dtype(out_shape, 'out_shape', ['int32'], + 'affine_grid') else: attrs['output_shape'] = out_shape diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index ff95dd7cf832d3e7df4f7f398326a2861eb137fc..d69b0831d6855237fad97068055d520ca57ffacf 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -3000,8 +3000,7 @@ class TestBook(LayerTest): out, ids = layers.argsort(input=data, axis=1) theta = layers.data(name="theta", shape=[2, 3], dtype="float32") - out_shape = layers.data( - name="out_shape", shape=[-1], dtype="float32") + out_shape = layers.data(name="out_shape", shape=[-1], dtype="int32") data_0 = layers.affine_grid(theta, out_shape) data_1 = layers.affine_grid(theta, [5, 3, 28, 28])