diff --git a/paddle/fluid/operators/metrics/precision_recall_op.cc b/paddle/fluid/operators/metrics/precision_recall_op.cc index 054f8c70cc2130431bfc4e903da67b94155863a6..ad7a485dbd0b3287e129dbaeadae63893057a483 100644 --- a/paddle/fluid/operators/metrics/precision_recall_op.cc +++ b/paddle/fluid/operators/metrics/precision_recall_op.cc @@ -22,18 +22,24 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("MaxProbs"), - "Input(MaxProbs) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Indices"), - "Input(Indices) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Labels"), - "Input(Labels) should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("BatchMetrics"), - "Output(BatchMetrics) should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("AccumMetrics"), - "Output(AccumMetrics) should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("AccumStatesInfo"), - "Output(AccumStatesInfo) should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("MaxProbs"), true, + platform::errors::InvalidArgument( + "Input(MaxProbs) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Indices"), true, + platform::errors::InvalidArgument( + "Input(Indices) should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Labels"), true, + platform::errors::InvalidArgument("Input(Labels) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("BatchMetrics"), true, + platform::errors::InvalidArgument( + "Output(BatchMetrics) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("AccumMetrics"), true, + platform::errors::InvalidArgument( + "Output(AccumMetrics) should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("AccumStatesInfo"), true, + platform::errors::InvalidArgument( + "Output(AccumStatesInfo) should not be null.")); int64_t cls_num = static_cast(ctx->Attrs().Get("class_number")); @@ -41,38 +47,48 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { auto labels_dims = ctx->GetInputDim("Labels"); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(max_probs_dims[1], 1, - "Each instance contains one max probability, so the " - "shape of Input(MaxProbs) should be [batch_size, 1]."); PADDLE_ENFORCE_EQ( - ctx->GetInputDim("Indices"), max_probs_dims, - "The shape of Input(Indices) should bes same with max_probs_dims"); + max_probs_dims[1], 1, + platform::errors::InvalidArgument( + "Each instance contains one max probability, so the shape of " + "Input(MaxProbs) should be [batch_size, 1]. But received (%d)", + max_probs_dims[1])); + PADDLE_ENFORCE_EQ(ctx->GetInputDim("Indices"), max_probs_dims, + platform::errors::InvalidArgument( + "The shape of Input(Indices) should bes same with " + "max_probs_dims, But received (%d) != (%d)", + ctx->GetInputDim("Indices"), max_probs_dims)); + PADDLE_ENFORCE_EQ(max_probs_dims[0], labels_dims[0], + platform::errors::InvalidArgument( + "The 1st dimension of Input(MaxProbs) and " + "Input(Labels) both are batch_size and the shape " + "should be the same. But received (%d) != (%d)", + max_probs_dims[0], labels_dims[0])); PADDLE_ENFORCE_EQ( - max_probs_dims[0], labels_dims[0], - "The 1st dimension of Input(MaxProbs) and " - "Input(Labels) both are batch_size and the shape should " - "be the same."); - PADDLE_ENFORCE_EQ(labels_dims[1], 1, - "The 2nd dimension of Input(Labels) contains instance " - "label and the shape should be equal to 1."); + labels_dims[1], 1, + platform::errors::InvalidArgument( + "The 2nd dimension of Input(Labels) contains instance label and " + "the shape should be equal to 1. But received (%d)", + labels_dims[1])); } if (ctx->HasInput("Weights")) { auto weights_dims = ctx->GetInputDim("Weights"); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(weights_dims, - framework::make_ddim({max_probs_dims[0], 1}), - "The shape of Input(Weights) should be " - "[batch_size, 1]."); + PADDLE_ENFORCE_EQ( + weights_dims, framework::make_ddim({max_probs_dims[0], 1}), + platform::errors::InvalidArgument( + "The shape of Input(Weights) should be [batch_size, 1].")); } } if (ctx->HasInput("StatesInfo")) { auto states_dims = ctx->GetInputDim("StatesInfo"); if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(states_dims, framework::make_ddim({cls_num, 4}), - "The shape of Input(StatesInfo) should be " - "[class_number, 4]."); + PADDLE_ENFORCE_EQ( + states_dims, framework::make_ddim({cls_num, 4}), + platform::errors::InvalidArgument( + "The shape of Input(StatesInfo) should be [class_number, 4].")); } } diff --git a/paddle/fluid/operators/metrics/precision_recall_op.h b/paddle/fluid/operators/metrics/precision_recall_op.h index d6d4a5adc3ed0760f2a1356c70eda275c3195969..4417c0866bdecec6143bad8cbe48d70bf2edbfcd 100644 --- a/paddle/fluid/operators/metrics/precision_recall_op.h +++ b/paddle/fluid/operators/metrics/precision_recall_op.h @@ -58,11 +58,25 @@ class PrecisionRecallKernel : public framework::OpKernel { size_t idx = ids_data[i]; size_t label = labels_data[i]; - PADDLE_ENFORCE(idx >= 0 && idx < cls_num, - "Class index of each instance should be in " - "[0, class_number)."); - PADDLE_ENFORCE(label >= 0 && label < cls_num, - "Label of each instance should be in [0, class_number)."); + PADDLE_ENFORCE_GE(idx, 0, platform::errors::InvalidArgument( + "Class index of each instance should be " + "larger than 0, But received (%d)", + idx)); + PADDLE_ENFORCE_LT(idx, cls_num, + platform::errors::InvalidArgument( + "Class index of each instance should be less than " + "cls_num (%d), But received (%d)", + cls_num, idx)); + + PADDLE_ENFORCE_GE(label, 0, platform::errors::InvalidArgument( + "Label of each instance should be larger " + "than 0, But received (%d)", + label)); + PADDLE_ENFORCE_LT(label, cls_num, + platform::errors::InvalidArgument( + "Label of each instance should be less than " + "cls_num (%d), But received (%d)", + cls_num, label)); T w = weights_data ? weights_data[i] : 1.0; if (idx == label) { diff --git a/paddle/fluid/operators/sequence_ops/sequence_concat_op.cc b/paddle/fluid/operators/sequence_ops/sequence_concat_op.cc index f0fd1fc65cb81ee63caa3804224f1a87321a192f..348e06396c7aa430c7a1b85c95a954142e508dd7 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.cc @@ -41,13 +41,20 @@ class SequenceConcatOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInputs("X"), - "Input(X) of Sequence Concat Op should not be null."); - PADDLE_ENFORCE(context->HasOutput("Out"), - "Output(Out) of Sequence Concat Op should not be null."); + PADDLE_ENFORCE_EQ( + context->HasInputs("X"), true, + platform::errors::InvalidArgument( + "Input(X) of Sequence Concat Op should not be null.")); + PADDLE_ENFORCE_EQ( + context->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of Sequence Concat Op should not be null.")); PADDLE_ENFORCE_GT(context->Inputs("X").size(), 1, - "The number of input sequences is at least two."); + platform::errors::InvalidArgument( + "The number of input sequences is at least two. But " + "the number of input sequences is(%d)", + context->Inputs("X").size())); auto x_dims = context->GetInputsDim("X"); int64_t batch_size = 0; int64_t feature_size = 0; @@ -62,7 +69,10 @@ class SequenceConcatOp : public framework::OperatorWithKernel { } else { PADDLE_ENFORCE_EQ( feature_size, framework::product(x_dim) / x_dim[0], - "Inputs of sequence concat must have same feature size"); + platform::errors::InvalidArgument( + "Inputs of sequence concat must have same feature size, But " + "received (%d) != (%d)", + feature_size, framework::product(x_dim) / x_dim[0])); } } if (batch_size < 0) { diff --git a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h index 9c5cc5c80316980caa29668a834fd09e1c70bcbd..d07d647ed507400fe1d510fd1d881b70433b22a8 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_concat_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_concat_op.h @@ -73,16 +73,23 @@ class SeqConcatKernel : public framework::OpKernel { for (auto &x : xs) { if (lod_size == 0) { PADDLE_ENFORCE_EQ(x.get().lod().empty(), false, - "Input(X) Tensor of SequenceConcatOp does not " - "contain LoD information."); + platform::errors::InvalidArgument( + "Input(X) Tensor of SequenceConcatOp does not " + "contain LoD information.")); lod_size = x.get().lod()[0].size(); } else { - PADDLE_ENFORCE_EQ( - lod_size, x.get().lod()[0].size(), - "The number of sequence must be same between each input"); + PADDLE_ENFORCE_EQ(lod_size, x.get().lod()[0].size(), + platform::errors::InvalidArgument( + "The number of sequence must be same between " + "each input. But received (%d) != (%d)", + lod_size, x.get().lod()[0].size())); } } - PADDLE_ENFORCE_NE(lod_size, 0, "Each input must have sequence information"); + PADDLE_ENFORCE_NE(lod_size, 0, + platform::errors::InvalidArgument( + "Each input must have sequence information. But " + "received input lod size is(%d)", + lod_size)); std::vector x_in_order; out.set_lod(detail::ConcatLoD(xs, &x_in_order)); @@ -100,7 +107,11 @@ class SeqConcatGradKernel : public framework::OpKernel { auto xs = context.MultiInput("X"); auto dxs = context.MultiOutput(framework::GradVarName("X")); - PADDLE_ENFORCE_EQ(xs.size(), dxs.size()); + PADDLE_ENFORCE_EQ(xs.size(), dxs.size(), + platform::errors::InvalidArgument( + "The number of Input X and Output Grad X must be " + "same, But received (%d) != (%d)", + xs.size(), dxs.size())); for (size_t i = 0; i < dxs.size(); ++i) { if (dxs[i] != nullptr) { dxs[i]->set_lod(xs[i]->lod()); diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc index e92fe3934cf2990f35b849e58a2a2e4bad01dabd..8e352348850e93ae5557de9b521e855f93f11d17 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc @@ -26,24 +26,34 @@ class SequencePadOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of SequencePadOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("PadValue"), true, - "Input(PadValue) of SequencePadOp should not be null."); + platform::errors::InvalidArgument( + "Input(X) of SequencePadOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("PadValue"), true, + platform::errors::InvalidArgument( + "Input(PadValue) of SequencePadOp should not be null.")); PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of SequencePadOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Length"), true, - "Output(Length) of SequencePadOp should not be null."); + platform::errors::InvalidArgument( + "Output(Out) of SequencePadOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Length"), true, + platform::errors::InvalidArgument( + "Output(Length) of SequencePadOp should not be null.")); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_GE(x_dims.size(), 2, - "The rank of Input(X) can't be less than 2."); + platform::errors::InvalidArgument( + "The rank of SequencePadOp Input(X) can't be less " + "than 2. But received (%d)", + x_dims.size())); auto time_step_dims = framework::slice_ddim(x_dims, 1, x_dims.size()); auto pad_value_dims = ctx->GetInputDim("PadValue"); PADDLE_ENFORCE_EQ(pad_value_dims == framework::make_ddim({1}) || pad_value_dims == time_step_dims, true, - "The Input(PadValue) must be a scalar or a tensor whose " - "shape equals to time steps in sequences"); + platform::errors::InvalidArgument( + "The Input(PadValue) must be a scalar or a tensor " + "whose shape equals to time steps in sequences")); int out_dim_0 = -1; @@ -54,31 +64,43 @@ class SequencePadOp : public framework::OperatorWithKernel { boost::get(ctx->GetInputVarPtrs("X")[0]); const auto& x_lod = x_var->Get().lod(); PADDLE_ENFORCE_EQ(x_lod.empty(), false, - "The Input(X) must hold lod info."); + platform::errors::InvalidArgument( + "The Input(X) must hold lod info.")); const auto& x_lod_0 = x_lod[0]; PADDLE_ENFORCE_GE(x_lod_0.size(), 2, - "The Input(X)'s lod info is corrupted."); + platform::errors::InvalidArgument( + "The Input(X)'s lod info is corrupted. ")); PADDLE_ENFORCE_EQ( x_dims[0], static_cast(x_lod_0.back()), - "The Input(X)'s lod info mismatches the actual tensor shape."); + platform::errors::InvalidArgument( + "The Input(X)'s lod info mismatches the actual tensor shape. The " + "Input(X)'s lod info is(%d), the actual tensor shape is(%d)", + x_dims[0], static_cast(x_lod_0.back()))); int seq_num = x_lod_0.size() - 1; int max_seq_len = math::MaximumSequenceLength(x_lod_0); if (padded_length == -1) { padded_length = max_seq_len; } - PADDLE_ENFORCE_GE(padded_length, max_seq_len, - "The Attr(padded_length) must be -1 or an int greater " - "than the length of the longest original sequence."); + PADDLE_ENFORCE_GE( + padded_length, max_seq_len, + platform::errors::InvalidArgument( + "The Attr(padded_length) must be -1 or an int greater than the " + "length of the longest original sequence. But the padded_length " + "received is (%d), the length of the longest original sequence " + "is (%d)", + padded_length, max_seq_len)); out_dim_0 = seq_num; } else { // compile time if (padded_length == -1) { padded_length = 1; } - PADDLE_ENFORCE_GT( - ctx->GetLoDLevel("X"), 0, - "The LoD level Input(X) of sequence_pad should be larger than 0."); + PADDLE_ENFORCE_GT(ctx->GetLoDLevel("X"), 0, + platform::errors::InvalidArgument( + "The LoD level Input(X) of sequence_pad should be " + "larger than 0. But received (%d)", + ctx->GetLoDLevel("X"))); } std::vector out_dims_vec{out_dim_0, padded_length}; @@ -185,10 +207,12 @@ class SequencePadGradOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of SequencePadGradOp should not be null."); + platform::errors::InvalidArgument( + "Input(X) of SequencePadGradOp should not be null.")); PADDLE_ENFORCE_EQ( ctx->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) of SequencePadGradOp should not be null."); + platform::errors::InvalidArgument( + "Input(Out@GRAD) of SequencePadGradOp should not be null.")); if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.h b/paddle/fluid/operators/sequence_ops/sequence_pad_op.h index 701cdc496fab8645fd286700d3c4f312c7bc2d14..0e1616bac16689ecac38f43f1647e6a849f863d2 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.h @@ -37,7 +37,8 @@ class SequencePadOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( x->lod().empty(), false, - "Input(X) Tensor of SequencePadOp does not contain LoD information."); + platform::errors::InvalidArgument("Input(X) Tensor of SequencePadOp " + "does not contain LoD information.")); const auto* pad_value = ctx.Input("PadValue"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc index cc8f8488a06e45f194187bad8e67949a6810926d..7871843f4836cb8895a310a672a0fd227e958b84 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.cc @@ -23,13 +23,19 @@ class SequenceReshapeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceReshapeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceReshapeOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of SequenceReshapeOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of SequenceReshapeOp should not be null.")); auto x_dims = ctx->GetInputDim("X"); auto x_numel = product(x_dims); - PADDLE_ENFORCE_EQ(x_dims.size(), 2U, "Rank of Input(X) should be 2."); + PADDLE_ENFORCE_EQ( + x_dims.size(), 2U, + platform::errors::InvalidArgument( + "Rank of Input(X) should be 2. But received (%d)", x_dims.size())); int new_dim = ctx->Attrs().Get("new_dim"); if (ctx->IsRuntime()) { ctx->SetOutputDim("Out", @@ -90,11 +96,14 @@ class SequenceReshapeGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE( - ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) of SequenceReshapeGradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceReshapeGradOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput(framework::GradVarName("Out")), true, + platform::errors::InvalidArgument( + "Input(Out@GRAD) of SequenceReshapeGradOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of SequenceReshapeGradOp should not be null.")); ctx->ShareDim("X", /*->*/ framework::GradVarName("X")); ctx->ShareLoD("X", /*->*/ framework::GradVarName("X")); diff --git a/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h index 7512a0ac2410910d26e8aec24b456844a02acd15..39adf278a97df08a77234f34f9b8811a8a1927cd 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_reshape_op.h @@ -33,13 +33,19 @@ class SequenceReshapeKernel : public framework::OpKernel { auto& in_lod = in->lod(); PADDLE_ENFORCE_EQ(in_lod.empty(), false, - "Input(X) Tensor of SequenceReshapeOp does not contain " - "LoD information."); + platform::errors::InvalidArgument( + "Input(X) Tensor of SequenceReshapeOp does not " + "contain LoD information.")); PADDLE_ENFORCE_EQ(in_lod.size(), 1UL, - "Only support one level sequence now."); - PADDLE_ENFORCE_EQ( - (uint64_t)in_dims[0], in_lod[0].back(), - "Inconsistent size between X.shape[0] and X.lod()[0].back()."); + platform::errors::InvalidArgument( + "Only support one level sequence now. But lod size " + "of Input(X) is (%d)", + in_lod.size())); + PADDLE_ENFORCE_EQ((uint64_t)in_dims[0], in_lod[0].back(), + platform::errors::InvalidArgument( + "The size of X.shape[0] and X.lod()[0].back() should " + "be same. But received (%d) != (%d)", + (uint64_t)in_dims[0], in_lod[0].back())); auto in_lod_l0 = in_lod[0]; int seq_num = in_lod_l0.size() - 1; @@ -56,10 +62,11 @@ class SequenceReshapeKernel : public framework::OpKernel { size_t offset = 0; offset = (seq_len * in_width) / out_width; PADDLE_ENFORCE_EQ(offset * out_width, seq_len * in_width, - "Please make sure (sequence_length * dimension) can " - "be divided by new_dim with no remainder for each " - "sequence. The %dth sequence is invalid.", - i + 1); + platform::errors::InvalidArgument( + "Please make sure (sequence_length * dimension) " + "can be divided by new_dim with no remainder for " + "each sequence. The %dth sequence is invalid.", + i + 1)); out_lod[0][i + 1] = out_lod[0][i] + offset; } } diff --git a/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h index c9415e52b1eaa4e0709a1062ec6d087d96c9cc05..4efebc6bb5e1bf689080a5c7845cfb9a0fcba5b2 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_reverse_op.h @@ -27,12 +27,19 @@ class SequenceReverseOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must exist"); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) must exist"); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of SequenceReverse must exist")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true, + platform::errors::InvalidArgument( + "Output(Y) of SequenceReverse must exist")); auto x_dim = ctx->GetInputDim("X"); PADDLE_ENFORCE_GE(x_dim.size(), 2, - "Rank of Input(X) must be not less than 2."); + platform::errors::InvalidArgument( + "Rank of Input(X) SequenceReverse must be not less " + "than 2. The Input(X) tensor's rank is(%d)", + x_dim.size())); ctx->SetOutputDim("Y", x_dim); ctx->ShareLoD("X", "Y"); @@ -108,10 +115,15 @@ class SequenceReverseOpKernel : public framework::OpKernel { auto *y = ctx.Output("Y"); PADDLE_ENFORCE_EQ(x.lod().empty(), false, - "Input(X) Tensor of SequenceReverseOp does not contain " - "LoD information."); - PADDLE_ENFORCE_EQ(x.lod().size(), 1, - "SequenceReverse Op only support one level lod."); + platform::errors::InvalidArgument( + "Input(X) Tensor of SequenceReverseOp does not " + "contain LoD information.")); + + PADDLE_ENFORCE_EQ( + x.lod().size(), 1, + platform::errors::InvalidArgument("SequenceReverse Op only support one " + "level lod. Input(X) lod is(%d)", + x.lod().size())); const size_t *lod; size_t lod_count = x.lod()[0].size(); @@ -131,8 +143,10 @@ class SequenceReverseOpKernel : public framework::OpKernel { auto *x_data = x.data(); auto *y_data = y->mutable_data(ctx.GetPlace()); - PADDLE_ENFORCE_NE(x_data, y_data, - "SequenceReverse Op does not support in-place operation"); + PADDLE_ENFORCE_NE( + x_data, y_data, + platform::errors::InvalidArgument( + "SequenceReverse Op does not support in-place operation")); if (platform::is_cpu_place(ctx.GetPlace())) { for (size_t idx = 0; idx < lod_count - 1; idx++) { diff --git a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc index e93ce7203f8a0fb47c2ad73f8c658c6737357bb4..2331e0da0dbb7d3ec73c26839f0d66c789484356 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_unpad_op.cc @@ -26,22 +26,36 @@ class SequenceUnpadOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of SequenceUnpadOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Length"), true, - "Input(Length) of SequenceUnpadOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of SequenceUnpadOp should not be null."); + platform::errors::InvalidArgument( + "Input(X) of SequenceUnpadOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Length"), true, + platform::errors::InvalidArgument( + "Input(Length) of SequenceUnpadOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of SequenceUnpadOp should not be null.")); auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE_GE(x_dims.size(), 2, - "The rank of Input(X) can't be less than 2."); + PADDLE_ENFORCE_GE( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "The rank of Input(X) can't be less than 2. But received (%d)", + x_dims.size())); auto len_dims = ctx->GetInputDim("Length"); - PADDLE_ENFORCE_EQ(len_dims.size(), 1, - "The shape of Input(Length) should be [batch_size]."); PADDLE_ENFORCE_EQ( - len_dims[0], x_dims[0], - "Input(X) and Input(Length) should have the same first dimension."); + len_dims.size(), 1, + platform::errors::InvalidArgument("The shape of Input(Length) should " + "be [batch_size]. But received (%d)", + len_dims.size())); + PADDLE_ENFORCE_EQ(len_dims[0], x_dims[0], + platform::errors::InvalidArgument( + "Input(X) and Input(Length) should have the same " + "first dimension. But the first dimension of " + "Input(X) and Input(Length) is (%d) != (%d)", + len_dims[0], x_dims[0])); int64_t out_dim_0 = -1; if (ctx->IsRuntime()) { @@ -115,11 +129,14 @@ class SequenceUnpadGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of SequenceUnpadGradOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of SequenceUnpadGradOp should not be null.")); PADDLE_ENFORCE_EQ( ctx->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) of SequenceUnpadGradOp should not be null."); + platform::errors::InvalidArgument( + "Input(Out@GRAD) of SequenceUnpadGradOp should not be null.")); if (ctx->HasOutput(framework::GradVarName("X"))) { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); diff --git a/paddle/fluid/operators/truncated_gaussian_random_op.cc b/paddle/fluid/operators/truncated_gaussian_random_op.cc index 1e8708f2648d7dd3c10319bd0a4be193d2458d53..4e4f22a66f43534a47df06d9de034bf85d467408 100644 --- a/paddle/fluid/operators/truncated_gaussian_random_op.cc +++ b/paddle/fluid/operators/truncated_gaussian_random_op.cc @@ -182,17 +182,21 @@ class TruncatedGaussianRandomOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE( - ctx->HasOutput("Out"), - "Output(Out) of TruncatedGaussianRandomOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of TruncatedGaussianRandomOp should not be null.")); auto shape = ctx->Attrs().Get>("shape"); std::vector out_dim; out_dim.reserve(shape.size()); for (auto dim : shape) { out_dim.push_back(static_cast(dim)); } - PADDLE_ENFORCE(shape.size() > 0UL, - "shape can be one int or array. shape must be set."); + PADDLE_ENFORCE_GT(shape.size(), 0UL, + platform::errors::InvalidArgument( + "shape can be one int or array. shape must be set, " + "But received (%d)", + shape.size())); ctx->SetOutputDim("Out", framework::make_ddim(out_dim)); } diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 63e103cdfd8611f043d987518d12e0ebd72a411d..99694b97267e2c4ce0c2a0530cc3565ea23853cd 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11450,6 +11450,9 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'): """ helper = LayerHelper('gaussian_random', **locals()) + check_type(shape, 'shape', (list, tuple), 'fluid.layers.gaussian_random') + check_dtype(dtype, 'dtype', ['float32', 'float64'], + 'fluid.layers.gaussian_random') out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( @@ -11543,6 +11546,12 @@ def gaussian_random_batch_size_like(input, """ helper = LayerHelper('gaussian_random_batch_size_like', **locals()) + check_type(input, 'input', (Variable), + 'fluid.layers.gaussian_random_batch_size_like') + check_type(shape, 'shape', (list, tuple), + 'fluid.layers.gaussian_random_batch_size_like') + check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'], + 'fluid.layers.gaussian_random_batch_size_like') out = helper.create_variable_for_type_inference(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype) helper.append_op( diff --git a/python/paddle/fluid/layers/sequence_lod.py b/python/paddle/fluid/layers/sequence_lod.py index 304571111db767bccf51757a248797ffe3015f9c..d52600268cc92ef2c48987e2dc7935184732266b 100644 --- a/python/paddle/fluid/layers/sequence_lod.py +++ b/python/paddle/fluid/layers/sequence_lod.py @@ -17,6 +17,7 @@ from __future__ import print_function from .layer_function_generator import templatedoc from ..framework import Variable, in_dygraph_mode from ..layer_helper import LayerHelper +from ..data_feeder import check_variable_and_dtype, check_type, check_dtype __all__ = [ 'sequence_conv', @@ -405,6 +406,16 @@ def sequence_concat(input, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_concat', **locals()) + + check_type(input, 'input', list, 'fluid.layers.sequence_concat') + if isinstance(input, list): + for i, input_x in enumerate(input): + check_type(input_x, 'input[' + str(i) + ']', Variable, + 'fluid.layers.sequence_concat') + check_dtype(input_x.dtype, 'input[' + str(i) + ']', + ['int64', 'float32', 'float64'], + 'fluid.layers.sequence_concat') + out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) helper.append_op( type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]}) @@ -926,6 +937,11 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_pad', input=x, **locals()) + check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_pad') + check_variable_and_dtype(pad_value, 'pad_value', + ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_pad') dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) length = helper.create_variable_for_type_inference(dtype) @@ -1001,6 +1017,10 @@ def sequence_unpad(x, length, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_unpad', input=x, **locals()) + check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_unpad') + check_variable_and_dtype(length, 'length', ['int64'], + 'fluid.layers.sequence_unpad') dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) @@ -1062,6 +1082,9 @@ def sequence_reshape(input, new_dim): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_reshape', **locals()) + check_variable_and_dtype(input, 'input', + ['float32', 'float64', 'int32', 'int64'], + 'fluid.layers.sequence_reshape') out = helper.create_variable_for_type_inference(helper.input_dtype()) helper.append_op( type='sequence_reshape', @@ -1334,6 +1357,9 @@ def sequence_reverse(x, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper("sequence_reverse", **locals()) + check_variable_and_dtype(x, 'x', + ['float32', 'float64', 'int8', 'int32', 'int64'], + 'fluid.layers.sequence_reverse') out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py index 5b3d55c84936ba5ceab89b776b5474653578ab7a..737c085dde6acf5e3645b2127f42b1d8b5a7aa1d 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_concat.py @@ -20,6 +20,8 @@ import sys sys.path.append("../") from op_test import OpTest +from paddle import fluid + class TestSequenceConcat(OpTest): def setLoD(self): @@ -76,5 +78,41 @@ class TestSequenceConcatCase5(TestSequenceConcat): self.out_lod = [20, 10] +class TestSequenceConcatOpError(unittest.TestCase): + def test_errors(self): + def test_input_list(): + # the input type must be list + x_data = fluid.layers.data(name='x', shape=[4], dtype='float32') + fluid.layers.sequence_concat(input=x_data) + + self.assertRaises(TypeError, test_input_list) + + def test_variable1(): + # the input element type must be Variable + x1_data = np.array([[3, 5]]).astype('float32') + y1_data = fluid.layers.data(name='y1', shape=[4], dtype='float32') + fluid.layers.sequence_concat(input=[x1_data, y1_data]) + + def test_variable2(): + x2_data = np.array([[3, 5]]).astype('float32') + y2_data = fluid.layers.data(name='y2', shape=[4], dtype='float32') + fluid.layers.sequence_concat(input=[y2_data, x2_data]) + + for i in range(2): + if i == 0: + self.assertRaises(TypeError, test_variable1) + else: + self.assertRaises(TypeError, test_variable2) + + def test_dtype(): + # dtype must be 'float32', 'float64', 'int64' + x3_data = fluid.layers.data(name="x3", shape=[3, 5], dtype='int32') + y3_data = fluid.layers.data(name="y3", shape=[3, 5], dtype='int16') + input_list = [x3_data, y3_data] + fluid.layers.sequence_concat(input=input_list) + + self.assertRaises(TypeError, test_dtype) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py index d04f10139b48b3d8c573245efee653ff009aa1b6..b9d53452adead5a796ee646acf5dce725ca0a745 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_pad_op.py @@ -18,6 +18,8 @@ import sys sys.path.append("../") from op_test import OpTest +import paddle.fluid as fluid + class TestSequencePadOp(OpTest): def set_attr(self): @@ -143,5 +145,34 @@ class TestSequencePadOp8(TestSequencePadOp): self.dtype = 'float64' +class TestSequencePadOpError(unittest.TestCase): + def test_error(self): + def test_x_variable(): + # the input x type must be Variable + x = np.random.random((2, 4)).astype("float32") + pad_value = fluid.layers.assign(input=np.array( + [0.0], dtype=np.float32)) + fluid.layers.sequence_pad(x=x, pad_value=pad_value) + + self.assertRaises(TypeError, test_x_variable) + + def test_pad_value_variable(): + x1 = fluid.layers.data( + name='x1', shape=[10, 5], dtype='float32', lod_level=1) + pad_value1 = np.array([0.0], dtype=np.float32) + fluid.layers.sequence_pad(x=x1, pad_value=pad_value1) + + self.assertRaises(TypeError, test_pad_value_variable) + + def test_dtype(): + x2 = fluid.layers.data( + name='x2', shape=[10, 5], dtype='int16', lod_level=1) + pad_value2 = fluid.layers.assign(input=np.array( + [0.0], dtype=np.int32)) + fluid.layers.sequence_pad(x=x2, pad_value=pad_value2) + + self.assertRaises(TypeError, test_dtype) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py index 4c65cd382ff9a5c41b43c745df62e03bdf44b5e1..6540c6a09444859d0d696b825e6fee494e2891b0 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reshape.py @@ -21,6 +21,8 @@ import sys sys.path.append("../") from op_test import OpTest +import paddle.fluid as fluid + class TestSequenceReshape(OpTest): def init_data(self): @@ -83,5 +85,25 @@ class TestSequenceReshape_reduce_seq_len0_case1(TestSequenceReshape): self.x = np.random.uniform(0.1, 1, [12, 12]).astype('float64') +class TestSequenceReshapeOpError(unittest.TestCase): + def test_error(self): + def test_variable(): + x = np.random.random((2, 4)).astype("float32") + fluid.layers.sequence_reshape(x=x, new_dim=4) + + self.assertRaises(TypeError, test_variable) + + def test_dtype(): + x1 = fluid.layers.data( + name='x1', + shape=[2, 6], + append_batch_size=False, + dtype='float16', + lod_level=1) + fluid.layers.sequence_reshape(x=x1, new_dim=4) + + self.assertRaises(TypeError, test_dtype) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py index 8c7099849153846548b248a0bf9f99c6dd9208ad..d0031dcc5f27d1a78f51b25e110cf9e5f0815910 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_reverse.py @@ -79,5 +79,22 @@ class TestSequenceReverse3(TestSequenceReverseBase): self.lod = [0, 2, 10, 0] +class TestSequenceReverseOpError(unittest.TestCase): + def test_error(self): + def test_variable(): + # the input type must be Variable + x_data = np.random.random((2, 4)).astype("float32") + fluid.layers.sequence_reverse(x=x_data) + + self.assertRaises(TypeError, test_variable) + + def test_dtype(): + # dtype must be 'float32', 'float64', 'int8', 'int32', 'int64' + x2_data = fluid.layers.data(name='x2', shape=[4], dtype='float16') + fluid.layers.sequence_reverse(x=x2_data) + + self.assertRaises(TypeError, test_dtype) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py b/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py index 458088b4f5bf44431d2271b20dd1c94d5a16c109..aec3033316ba30614035bf42ded4f638728c4b13 100644 --- a/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py +++ b/python/paddle/fluid/tests/unittests/sequence/test_sequence_unpad_op.py @@ -19,6 +19,8 @@ import sys sys.path.append("../") from op_test import OpTest +import paddle.fluid as fluid + class TestSequenceUnpadOp(OpTest): def init(self): @@ -84,5 +86,36 @@ class TestSequenceUnpadOp4(TestSequenceUnpadOp): self.dtype = "float64" +class TestSequenceUnpadOpError(unittest.TestCase): + def test_error(self): + def test_x_variable(): + x = np.random.random((10, 5)).astype("float64") + len = fluid.data(name='length2', shape=[10], dtype='int64') + fluid.layers.sequence_pad(x=x, length=len) + + self.assertRaises(TypeError, test_x_variable) + + def test_length_variable(): + x1 = fluid.data(name='x1', shape=[10, 5], dtype='float32') + len1 = np.random.random((10)).astype("int64") + fluid.layers.sequence_pad(x=x1, length=len1) + + self.assertRaises(TypeError, test_length_variable) + + def test_x_dtype(): + x2 = fluid.data(name='x2', shape=[10, 5], dtype='float16') + len2 = fluid.data(name='length2', shape=[10], dtype='int64') + fluid.layers.sequence_pad(x=x2, length=len2) + + self.assertRaises(TypeError, test_x_dtype) + + def test_length_dtype(): + x3 = fluid.data(name='x3', shape=[10, 5], dtype='float64') + len3 = fluid.data(name='length3', shape=[10], dtype='int32') + fluid.layers.sequence_pad(x=x3, length=len3) + + self.assertRaises(TypeError, test_length_dtype) + + if __name__ == '__main__': unittest.main()