From ebb369747df537298a3a03ca50aa0f9583d9d15a Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 13 May 2020 23:04:48 +0800 Subject: [PATCH] [Cherry-pick] Aome SL Api/Op error msg polish (#24495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * API/OP (Some SL API) error message enhancement (#24441) * polish some sl api error message, test=develop * polish python input check of stride slice, test=develop * fix unittest bugs, test=develop * fix error info for transpose sequence_conv_pool max_sequence_len sequ… (#24437) * fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop * fix error info for transpose sequence_conv_pool max_sequence_len sequence_erase, test=develop * update modify, test=develop * update modify, test=develop * fixed some modifications, test=develop Co-authored-by: Xing Wu --- paddle/fluid/operators/max_sequence_len_op.cc | 3 +- .../sequence_ops/sequence_erase_op.cc | 13 ++-- .../sequence_ops/sequence_erase_op.cu | 6 +- .../sequence_ops/sequence_erase_op.h | 8 ++- .../sequence_ops/sequence_scatter_op.cc | 45 ++++++++----- .../sequence_ops/sequence_scatter_op.h | 49 ++++++++++---- .../sequence_ops/sequence_slice_op.cc | 32 +++++---- .../sequence_ops/sequence_slice_op.h | 44 ++++++++++--- .../sequence_ops/sequence_softmax_op.cc | 35 +++++----- .../sequence_ops/sequence_softmax_op.h | 31 +++++---- paddle/fluid/operators/strided_slice_op.cc | 66 +++++++++++++------ paddle/fluid/operators/strided_slice_op.h | 16 +++-- paddle/fluid/operators/transpose_op.cc | 50 +++++++------- paddle/fluid/operators/transpose_op.h | 5 +- python/paddle/fluid/layers/nn.py | 33 +++++++--- python/paddle/fluid/layers/sequence_lod.py | 29 ++++++-- python/paddle/fluid/nets.py | 2 + .../tests/unittests/test_strided_slice_op.py | 6 +- 18 files changed, 310 insertions(+), 163 deletions(-) diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index 3ac9c584727..b47ec8bc70a 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -57,7 +57,8 @@ class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { class MaxSeqenceLenInferShape : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext *context) const override { - PADDLE_ENFORCE(context->HasInput("RankTable")); + OP_INOUT_CHECK(context->HasInput("RankTable"), "Input", "RankTable", + "MaxSeqenceLen"); context->SetOutputDim("Out", {1}); } }; diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc index 4343a76b484..79503d9714f 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cc @@ -23,14 +23,15 @@ class SequenceEraseOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceErase operator should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceErase operator should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceErase"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceErase"); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE(x_dims.size() == 2 && x_dims[1] == 1, - "Input(X) of SequenceEraseOp should be a 2-D LoDTensor " - "with the 2nd dimension equal to 1."); + platform::errors::InvalidArgument( + "Input(X) of SequenceEraseOp should be a 2-D LoDTensor " + "with the 2nd dimension equal to 1," + "but received size %d with the 2nd dimension %d.", + x_dims.size(), x_dims[1])); ctx->SetOutputDim("Out", x_dims); // The output LoDTensor's lod_level should be input X's lod_level. // For compile-time, we call SetLoDLevel to set output's lod_level. diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu index 0401c22c92e..bacaaeadbf5 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.cu @@ -64,8 +64,10 @@ class SequenceEraseOpCUDAKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = in->lod(); - PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), - "The actual size mismatches with the LoD information."); + PADDLE_ENFORCE_EQ( + lod[lod.size() - 1].back(), (size_t)in->numel(), + platform::errors::InvalidArgument( + "The actual size mismatches with the LoD information.")); auto tokens = ctx.Attr>("tokens"); auto in_len = in->numel(); auto in_dat = in->data(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h index 0c2d2894174..ed98b694b27 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_erase_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_erase_op.h @@ -30,9 +30,13 @@ class SequenceEraseKernel : public framework::OpKernel { auto lod = in->lod(); PADDLE_ENFORCE_EQ( lod.empty(), false, - "Input(X) Tensor of SequenceEraseOp does not contain LoD information."); + platform::errors::InvalidArgument("Input(X) Tensor of SequenceEraseOp " + "does not contain LoD information.")); PADDLE_ENFORCE_EQ(lod[lod.size() - 1].back(), (size_t)in->numel(), - "The actual size mismatches with the LoD information."); + platform::errors::InvalidArgument( + "The actual input size %d mismatches with the LoD " + "information size %d.", + lod[lod.size() - 1].back(), (size_t)in->numel())); auto tokens = ctx.Attr>("tokens"); auto in_len = in->numel(); auto in_dat = in->data(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc index 361afa286cf..c89a2f2b66c 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc @@ -74,23 +74,25 @@ class SequenceScatterOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { // Enforce has inputs and outputs - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceScatterOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Ids"), - "Input(Ids) of SequenceScatterOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Updates"), - "Input(Updates) of SequenceScatterOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceScatterOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceScatter"); + OP_INOUT_CHECK(ctx->HasInput("Ids"), "Input", "Ids", "SequenceScatter"); + OP_INOUT_CHECK(ctx->HasInput("Updates"), "Input", "Updates", + "SequenceScatter"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceScatter"); // Set output dim the same as input auto ref_dims = ctx->GetInputDim("X"); ctx->SetOutputDim("Out", ref_dims); // Enforce the Updates and Ids are the same shape - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Updates")[0], - ctx->GetInputDim("Ids")[0], - "Updates and Ids should have same shape."); + auto updates_dim = ctx->GetInputDim("Updates"); + auto ids_dim = ctx->GetInputDim("Ids"); + PADDLE_ENFORCE_EQ( + updates_dim[0], ids_dim[0], + platform::errors::InvalidArgument( + "The shape of SequenceScatter operator's input Updates and Ids do " + "not match, receive Updates's shape is [%s], Ids's shape is [%s].", + updates_dim, ids_dim)); // Enforce LoD of ids and updates be the same if (ctx->IsRuntime()) { @@ -101,12 +103,21 @@ class SequenceScatterOp : public framework::OperatorWithKernel { auto& ids_lod = ids_var->Get().lod(); auto& updates_lod = updates_var->Get().lod(); - PADDLE_ENFORCE_EQ(ids_lod.size(), 1, - "Currently only level 1 LoD could be" - " processed by sequence scatter op."); - PADDLE_ENFORCE_EQ(updates_lod.size(), 1, - "Currently only level 1 LoD " - "could be processed by sequence scatter op."); + PADDLE_ENFORCE_EQ( + ids_lod.size(), 1, + platform::errors::InvalidArgument( + "The SequenceScatter operator’s Input Ids holds wrong LoD " + "information. Currently SequenceScatter operator can only deal " + "with one level LoD for input Ids, but received LoD level is %d.", + ids_lod.size())); + PADDLE_ENFORCE_EQ( + updates_lod.size(), 1, + platform::errors::InvalidArgument( + "The SequenceScatter operator’s Input Updates holds wrong LoD " + "information. Currently SequenceScatter operator can only deal " + "with one level LoD for input Updates, but received LoD level is " + "%d.", + ids_lod.size())); } } diff --git a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h index 917a3ed49c0..365381abc46 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.h @@ -35,8 +35,9 @@ class SequenceScatterOpKernel : public framework::OpKernel { auto& ids_lod = ids->lod(); PADDLE_ENFORCE_EQ(ids_lod.empty(), false, - "Input(Ids) Tensor of SequenceScatterOp does not contain " - "LoD information."); + platform::errors::InvalidArgument( + "Input(Ids) Tensor of SequenceScatter operator does " + "not contain LoD information.")); // Initialize out as same as x out->mutable_data(ctx.GetPlace()); @@ -46,9 +47,12 @@ class SequenceScatterOpKernel : public framework::OpKernel { auto out_dims = out->dims(); for (int i = 0; i < x_dims.size(); ++i) - PADDLE_ENFORCE(x_dims[i] == out_dims[i], - "Input and output shape of " - "sequence scatter op must exactly be the same."); + PADDLE_ENFORCE_EQ(x_dims[i], out_dims[i], + platform::errors::InvalidArgument( + "Input(X) and output(Out) shape of SequenceScatter " + "operator do not match. Received input(X)'s shape " + "is [%s], output(Out)'s shape is [%s].", + x_dims, out_dims)); size_t slice_size = 1; for (int i = 1; i < x_dims.size(); ++i) slice_size *= x_dims[i]; @@ -56,8 +60,13 @@ class SequenceScatterOpKernel : public framework::OpKernel { auto lod_vec = ids_lod[0]; unsigned int seg = 0; for (int i = 0; i < ids->dims()[0]; ++i) { - PADDLE_ENFORCE_LT(seg, lod_vec.size() - 1, - "Segment num must not exceed batch size.\n"); + PADDLE_ENFORCE_LT( + seg, lod_vec.size() - 1, + platform::errors::OutOfRange("The segment index is out of bound in " + "SequenceScatter operator, it must be " + "less than batch size. The segment " + "index is %d, the batch size is %d.", + seg, lod_vec.size())); int lower_bound = lod_vec[seg]; int upper_bound = lod_vec[seg + 1]; if (i >= lower_bound && i < upper_bound) { @@ -77,8 +86,11 @@ template class SequenceScatterGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::Unimplemented("Device dose not match. The " + "SequenceScatterGradientOpKernel can " + "only run on CPU device.")); auto* dX = ctx.Output(framework::GradVarName("X")); auto* dUpdates = ctx.Output(framework::GradVarName("Updates")); auto* ids = ctx.Input("Ids"); @@ -94,9 +106,13 @@ class SequenceScatterGradientOpKernel : public framework::OpKernel { auto dout_dims = dOut->dims(); for (int i = 0; i < dx_dims.size(); ++i) - PADDLE_ENFORCE(dx_dims[i] == dout_dims[i], - "Input and output shape of " - "sequence scatter grad op must exactly be the same."); + PADDLE_ENFORCE_EQ(dx_dims[i], dout_dims[i], + platform::errors::InvalidArgument( + "Input(Out@GRAD) and output(X@GRAD) shape of " + "SequenceScatterGradient operator do not match. " + "Received input(Out@GRAD)'s shape is [%s], " + "output(X@GRAD)'s shape is [%s].", + dout_dims, dx_dims)); size_t slice_size = 1; for (int i = 1; i < dx_dims.size(); ++i) slice_size *= dx_dims[i]; @@ -105,8 +121,13 @@ class SequenceScatterGradientOpKernel : public framework::OpKernel { unsigned int seg = 0; for (int i = 0; i < ids->dims()[0]; ++i) { - PADDLE_ENFORCE_LT(seg, lod_vec.size() - 1, - "Segment num must not exceed batch size.\n"); + PADDLE_ENFORCE_LT( + seg, lod_vec.size() - 1, + platform::errors::OutOfRange( + "The segment index is out of bound in SequenceScatterGradient " + "operator, it must be less than batch size. The segment index is " + "%d, the batch size is %d.", + seg, lod_vec.size())); int lower_bound = lod_vec[seg]; int upper_bound = lod_vec[seg + 1]; if (i >= lower_bound && i < upper_bound) { diff --git a/paddle/fluid/operators/sequence_ops/sequence_slice_op.cc b/paddle/fluid/operators/sequence_ops/sequence_slice_op.cc index 0ba5108edd5..7787a5c2b89 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_slice_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_slice_op.cc @@ -23,14 +23,10 @@ class SequenceSliceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceSliceOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Offset"), - "Input(Offset) of SequenceSliceOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Length"), - "Input(Length) of SequenceSliceOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceSliceOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSlice"); + OP_INOUT_CHECK(ctx->HasInput("Offset"), "Input", "Offset", "SequenceSlice"); + OP_INOUT_CHECK(ctx->HasInput("Length"), "Input", "Length", "SequenceSlice"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceSlice"); auto input_dims = ctx->GetInputDim("X"); auto offset_dim = ctx->GetInputDim("Offset"); @@ -38,10 +34,18 @@ class SequenceSliceOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( offset_dim.size(), 2UL, - "Only support one level sequence now, The rank of offset must be 2."); + platform::errors::InvalidArgument( + "Input Offset dimension error. SequenceSlice operator only support " + "one level sequence now, the dimension of input Offset must be 2, " + "but received dimension is %d.", + offset_dim.size())); PADDLE_ENFORCE_EQ( length_dim.size(), 2UL, - "Only support one level sequence now, The rank of Length must be 2."); + platform::errors::InvalidArgument( + "Input Length dimension error. SequenceSlice operator only support " + "one level sequence now, the dimension of input Length must be 2, " + "but received dimension is %d.", + offset_dim.size())); // Initialize the output's dims to maximum, // and re-set to real dims by the value of Offset and Length at kernel @@ -62,10 +66,10 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "The gradient of Out should not be null."); - PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName("X")), - "The gradient of X should not be null."); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "SequenceSliceGrad"); + OP_INOUT_CHECK(ctx->HasOutputs(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "SequenceSliceGrad"); ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); } diff --git a/paddle/fluid/operators/sequence_ops/sequence_slice_op.h b/paddle/fluid/operators/sequence_ops/sequence_slice_op.h index e2ddffa54a7..65e021b507a 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_slice_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_slice_op.h @@ -49,18 +49,32 @@ class SequenceSliceOpKernel : public framework::OpKernel { auto* out = ctx.Output("Out"); auto lod = in->lod(); - PADDLE_ENFORCE_EQ( - lod.empty(), false, - "Input(X) Tensor of SequenceSliceOp does not contain LoD information."); + PADDLE_ENFORCE_EQ(lod.empty(), false, + platform::errors::InvalidArgument( + "Input(X) Tensor of SequenceSlice operator does not " + "contain LoD information.")); + PADDLE_ENFORCE_EQ( + lod.size(), 1UL, + platform::errors::InvalidArgument( + "LoD information error. SequenceSlice operator only support one " + "level sequence now, but received LoD level is %d.", + lod.size())); auto n = lod[0].size() - 1; - PADDLE_ENFORCE_EQ(lod.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_EQ( n, static_cast(length->dims()[0]), - "The size of input-sequence and length-array should be the same"); + platform::errors::InvalidArgument( + "Input length shape error. The length of input LoD sequence and " + "input length-array‘s first dimension should be equal, but the LoD " + "sequence length is %d, the length-array‘s first dimension is %d.", + n, static_cast(length->dims()[0]))); PADDLE_ENFORCE_EQ( n, static_cast(offset->dims()[0]), - "The size of input-sequence and offset-array should be the same"); + platform::errors::InvalidArgument( + "Input offset shape error. The length of input LoD sequence and " + "input offset-array‘s first dimension should be equal, but the LoD " + "sequence length is %d, the offset-array‘s first dimension is %d.", + n, static_cast(offset->dims()[0]))); const int64_t* offset_data = offset->data(); const int64_t* length_data = length->data(); @@ -79,11 +93,21 @@ class SequenceSliceOpKernel : public framework::OpKernel { for (size_t i = 0; i < n; ++i) { PADDLE_ENFORCE_LE(0, offset_data[i], - "The offset[%d] must be nonnegative.", i); + platform::errors::InvalidArgument( + "The input offset[%d]'s value is negative, its " + "value is %d, expect it to be non-negative.", + i, offset_data[i])); PADDLE_ENFORCE_LE(0, length_data[i], - "The length[%d] must be nonnegative.", i); - PADDLE_ENFORCE_LE(lod[0][i] + offset_data[i] + length_data[i], - lod[0][i + 1], "The target tensor's length overflow."); + platform::errors::InvalidArgument( + "The input length[%d]'s value is negative, its " + "value is %d, expect it to be non-negative.", + i, offset_data[i])); + PADDLE_ENFORCE_LE( + lod[0][i] + offset_data[i] + length_data[i], lod[0][i + 1], + platform::errors::OutOfRange( + "The slice end index of target tensor is out of range. expect it " + "less than or equal to %d, but the actual slice end index is %d.", + lod[0][i + 1], lod[0][i] + offset_data[i] + length_data[i])); } out->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc index ec4565c3b20..992a0b458b1 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.cc @@ -23,10 +23,8 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceSoftmaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceSoftmaxOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSoftmax"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceSoftmax"); ctx->ShareDim("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out"); @@ -108,21 +106,22 @@ class SequenceSoftmaxGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Out"), - "Input(Out) of SequenceSoftmaxGradOp should not be null."); - PADDLE_ENFORCE( - ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) of SequenceSoftmaxGradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceSoftmaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X@GRAD) of SequenceSoftmaxOp should not be null."); - + OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "SequenceSoftmaxGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@GRAD", "SequenceSoftmaxGrad"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceSoftmaxGrad"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + "X@GRAD", "SequenceSoftmaxGrad"); + + auto out_dim = ctx->GetInputDim("Out"); + auto out_grad_dim = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_EQ( - ctx->GetInputDim("Out"), - ctx->GetInputDim(framework::GradVarName("Out")), - "Input(Out) and Input(Out@GRAD) of SequenceSoftmaxGradOp should be of " - "the same shape."); + out_dim, out_grad_dim, + platform::errors::InvalidArgument( + "The shape of Input(Out) and Input(Out@GRAD) of " + "SequenceSoftmaxGrad operator do not match. The Input(Out)'s shape " + "is [%s], the Input(Out@GRAD)'s shape is [%s].", + out_dim, out_grad_dim)); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); } diff --git a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h index 4d8f1af456c..e0aa255a68b 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_softmax_op.h @@ -95,20 +95,27 @@ class SequenceSoftmaxKernel : public framework::OpKernel { auto lod = x->lod(); auto dims = x->dims(); - PADDLE_ENFORCE_EQ(lod.empty(), false, - "Input(X) Tensor of SequenceSoftmaxOp does not contain " - "LoD information."); + PADDLE_ENFORCE_EQ( + lod.empty(), false, + platform::errors::InvalidArgument( + "Input(X) Tensor of SequenceSoftmax operator does not contain " + "LoD information.")); const size_t level = lod.size() - 1; - PADDLE_ENFORCE_GT( - lod.size(), 0U, - "The LoD level of Input X should be larger than 0 (lod.size() > 0)."); - PADDLE_ENFORCE_EQ(dims[0], static_cast(lod[level].back()), - "The first dimension of Input(X) should be equal to the " - "sum of all sequences' lengths."); - PADDLE_ENFORCE_EQ(dims[0], x->numel(), - "The width of each timestep in Input(X) of " - "SequenceSoftmaxOp should be 1."); + PADDLE_ENFORCE_EQ( + dims[0], static_cast(lod[level].back()), + platform::errors::InvalidArgument( + "The first dimension of Input(X) should be equal to the sum of all " + "sequences' lengths. But the first dimension of Input(X) is %d, " + "the sum of all sequences' lengths is %d.", + dims[0], static_cast(lod[level].back()))); + PADDLE_ENFORCE_EQ( + dims[0], x->numel(), + platform::errors::InvalidArgument( + "The width of each timestep in Input(X) of SequenceSoftmax " + "operator should be 1. But the first dimension of Input(X) is %d, " + "the number of elements is %d.", + dims[0], x->numel())); out->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/strided_slice_op.cc b/paddle/fluid/operators/strided_slice_op.cc index a0d6072af1b..190fec1114f 100644 --- a/paddle/fluid/operators/strided_slice_op.cc +++ b/paddle/fluid/operators/strided_slice_op.cc @@ -29,14 +29,16 @@ class StridedSliceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, - "Input (Input) of slice op should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output (Out) of slice op should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "StridedSlice"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "StridedSlice"); auto in_dims = ctx->GetInputDim("Input"); - PADDLE_ENFORCE_LT(in_dims.size(), 7, - "The rank of input should be less than 7."); + PADDLE_ENFORCE_LT( + in_dims.size(), 7, + platform::errors::InvalidArgument( + "The dimension of StridedSlice operator's input should be less " + "than 7, but received dimension is %d.", + in_dims.size())); auto starts = ctx->Attrs().Get>("starts"); auto ends = ctx->Attrs().Get>("ends"); auto strides = ctx->Attrs().Get>("strides"); @@ -50,20 +52,26 @@ class StridedSliceOp : public framework::OperatorWithKernel { if (ctx->HasInputs("StartsTensorList")) { auto StartsTensorList = ctx->Inputs("StartsTensorList"); - PADDLE_ENFORCE_GT(StartsTensorList.size(), 0, - "StartsTensorList size can't be zero"); + PADDLE_ENFORCE_GT( + StartsTensorList.size(), 0, + platform::errors::InvalidArgument( + "StridedSlice operator's StartsTensorList is empty.")); starts_size = StartsTensorList.size(); } if (ctx->HasInputs("EndsTensorList")) { auto EndsTensorList = ctx->Inputs("EndsTensorList"); - PADDLE_ENFORCE_GT(EndsTensorList.size(), 0, - "EndsTensorList size can't be zero"); + PADDLE_ENFORCE_GT( + EndsTensorList.size(), 0, + platform::errors::InvalidArgument( + "StridedSlice operator's EndsTensorList is empty.")); ends_size = EndsTensorList.size(); } if (ctx->HasInputs("StridesTensorList")) { auto StridesTensorList = ctx->Inputs("StridesTensorList"); - PADDLE_ENFORCE_GT(StridesTensorList.size(), 0, - "StridesTensorList size can't be zero"); + PADDLE_ENFORCE_GT( + StridesTensorList.size(), 0, + platform::errors::InvalidArgument( + "StridedSlice operator's StridesTensorList is empty.")); strides_size = StridesTensorList.size(); } @@ -73,18 +81,31 @@ class StridedSliceOp : public framework::OperatorWithKernel { tensor_input = true; } if (!ctx->HasInput("EndsTensor")) { - PADDLE_ENFORCE_EQ(ends_size, axes.size(), - "The size of ends must be equal to the size of axes."); + PADDLE_ENFORCE_EQ( + ends_size, axes.size(), + platform::errors::InvalidArgument( + "The size of ends attribute in StridedSlice operator is not " + "equal to the size of axes attribute. The ends attribute's size " + "is %d, axes attribute's size is %d.", + ends_size, axes.size())); } if (!ctx->HasInput("StartsTensor")) { PADDLE_ENFORCE_EQ( starts_size, axes.size(), - "The size of starts must be equal to the size of axes."); + platform::errors::InvalidArgument( + "The size of starts attribute in StridedSlice operator is not " + "equal to the size of axes attribute. The starts attribute's " + "size is %d, axes attribute's size is %d.", + starts_size, axes.size())); } if (!ctx->HasInput("StridesTensor")) { PADDLE_ENFORCE_EQ( strides_size, axes.size(), - "The size of strides must be equal to the size of axes."); + platform::errors::InvalidArgument( + "The size of strides attribute in StridedSlice operator is not " + "equal to the size of axes attribute. The strides attribute's " + "size is %d, axes attribute's size is %d.", + strides_size, axes.size())); } // we need to analysis strided slice op is valid for // the parameter that we get from python front @@ -101,7 +122,10 @@ class StridedSliceOp : public framework::OperatorWithKernel { for (size_t i = 0; i < decrease_axis.size(); ++i) { if (ctx->IsRuntime() && infer_flags[i] != -1) { PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1, - "decrease dim should be 1"); + platform::errors::InvalidArgument( + "the size of decrease dimension should be 1, " + "but received %d.", + out_dims[decrease_axis[i]])); } out_dims[decrease_axis[i]] = 0; } @@ -219,9 +243,11 @@ class StridedSliceOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, "Input should not be null"); - PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", + "StridedSliceGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@GRAD", "StridedSliceGrad"); + auto x_dims = ctx->GetInputDim("Input"); auto x_grad_name = framework::GradVarName("Input"); if (ctx->HasOutput(x_grad_name)) { diff --git a/paddle/fluid/operators/strided_slice_op.h b/paddle/fluid/operators/strided_slice_op.h index 3bb696a8008..647fb8b17f6 100644 --- a/paddle/fluid/operators/strided_slice_op.h +++ b/paddle/fluid/operators/strided_slice_op.h @@ -54,7 +54,9 @@ static void StridedSliceOutDims( continue; } - PADDLE_ENFORCE_NE(stride_index, 0, "stride must not to be zero"); + PADDLE_ENFORCE_NE(stride_index, 0, + platform::errors::InvalidArgument( + "stride index in StridedSlice operator is 0.")); int axis_size = in_dims[axes_index]; if (axis_size < 0) { continue; @@ -78,8 +80,9 @@ static void StridedSliceOutDims( ((stride_index < 0 && (start_index <= end_index)) || (stride_index > 0 && (start_index >= end_index))); PADDLE_ENFORCE_EQ(zero_dim_condition, false, - "starts and end must meet requirement in different " - "stride conditiont"); + platform::errors::InvalidArgument( + "The start index and end index are invalid for their " + "corresponding stride.")); int left = std::max(0, std::min(start_index, end_index)); int right = std::min(axis_size, std::max(start_index, end_index)); int step = std::abs(stride_index); @@ -249,8 +252,11 @@ class StridedSliceKernel : public framework::OpKernel { if (decrease_axis.size() > 0) { std::vector new_out_shape; for (size_t i = 0; i < decrease_axis.size(); ++i) { - PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1, - "decrease dim should be 1"); + PADDLE_ENFORCE_EQ( + out_dims[decrease_axis[i]], 1, + platform::errors::InvalidArgument( + "the size of decrease dimension should be 1, but received %d.", + out_dims[decrease_axis[i]])); out_dims_origin[decrease_axis[i]] = 0; } diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index d32ecd37817..6849bd73950 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -31,31 +31,33 @@ class TransposeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Transpose"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Transpose"); auto x_dims = ctx->GetInputDim("X"); std::vector axis = ctx->Attrs().Get>("axis"); size_t x_rank = x_dims.size(); size_t axis_size = axis.size(); PADDLE_ENFORCE_EQ(x_rank, axis_size, - "ShapeError: The input tensor's dimension " - "should be equal to the axis's size. " - "But received input tensor's dimension is %d, " - "axis's size is %d", - x_rank, axis_size); + platform::errors::InvalidArgument( + "The input tensor's dimension " + "should be equal to the axis's size. " + "But received input tensor's dimension is %d, " + "axis's size is %d", + x_rank, axis_size)); std::vector count(axis_size, 0); for (size_t i = 0; i < axis_size; i++) { - PADDLE_ENFORCE( - axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, - "ValueError: Each element of Attribute axis should " - "be a unique value range from 0 to (dims - 1), " - "where the dims is the axis's size, " - "unique value means this axis value can appear only once. " - "But received axis[%d] is %d, axis_size is %d, " - "count[axis[%d]] is %d", - i, axis[i], axis_size, i, count[axis[i]]); + PADDLE_ENFORCE_EQ( + axis[i] < static_cast(axis_size) && ++count[axis[i]] == 1, true, + platform::errors::InvalidArgument( + "Each element of Attribute axis should " + "be a unique value range from 0 to (dims - 1), " + "where the dims is the axis's size, " + "unique value means this axis value can appear only once. " + "But received axis[%d] is %d, axis_size is %d, " + "count[axis[%d]] is %d", + i, axis[i], axis_size, i, count[axis[i]])); } framework::DDim out_dims(x_dims); @@ -149,9 +151,9 @@ class TransposeOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "TransposeOpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "TransposeOpGrad"); auto x_dims = ctx->GetInputDim("X"); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); if (ctx->HasOutput(framework::GradVarName("X"))) { @@ -193,8 +195,7 @@ class Transpose2Op : public TransposeOp { void InferShape(framework::InferShapeContext *ctx) const override { TransposeOp::InferShape(ctx); - PADDLE_ENFORCE(ctx->HasOutput("XShape"), - "Output(XShape) should not be null"); + OP_INOUT_CHECK(ctx->HasOutput("XShape"), "Output", "XShape", "Transpose2"); const auto &in_dims = ctx->GetInputDim("X"); std::vector x_shape_dim(in_dims.size() + 1); x_shape_dim[0] = 0; @@ -259,9 +260,10 @@ class Transpose2OpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("XShape"), "Input(XShape) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput("XShape"), "Input", "XShape", + "Transpose2OpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "Transpose2OpGrad"); if (ctx->HasOutput(framework::GradVarName("X"))) { auto xshape_dim = ctx->GetInputDim("XShape"); auto x_shape_dim = diff --git a/paddle/fluid/operators/transpose_op.h b/paddle/fluid/operators/transpose_op.h index 557cb408ee8..f2951e90ebe 100644 --- a/paddle/fluid/operators/transpose_op.h +++ b/paddle/fluid/operators/transpose_op.h @@ -53,7 +53,10 @@ inline void TransCompute(const int dim, const DeviceContext& dev_ctx, trans6(dev_ctx, in, out, axis); break; default: - PADDLE_THROW("Tensors with rank at most 6 are supported"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Tensors with rank at most 6 are supported" + ", but received input tensor's rank is %d,", + dim)); } } diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1a22c084eb2..a38389d7869 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -11929,18 +11929,31 @@ def strided_slice(input, axes, starts, ends, strides): sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2]. """ - if not isinstance(starts, (list, tuple, Variable)): - raise ValueError( - "Input starts must be an Variable, python list or tuple.") - if not isinstance(ends, (list, tuple, Variable)): - raise ValueError( - "Input ends must be an Variable, python list or tuple.") - if not isinstance(strides, (list, tuple, Variable)): - raise ValueError( - "Input strides must be an Variable, python list or tuple.") - helper = LayerHelper('strided_slice', **locals()) + check_variable_and_dtype(input, 'input', + ['float32', 'float64', 'int32', 'int64'], + 'strided_slice') + check_type(axes, 'axes', (list, tuple), 'strided_slice') + check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice') + check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice') + check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice') + + def check_list_elements_dtype(list_input, input_name): + if isinstance(list_input, Variable): + check_dtype(list_input.dtype, input_name, ['int32'], + 'strided_slice') + else: + for i, var in enumerate(list_input): + var_name = input_name + '[' + str(i) + ']' + if isinstance(var, Variable): + check_dtype(var.dtype, var_name, ['int32'], 'strided_slice') + + check_list_elements_dtype(axes, 'axes') + check_list_elements_dtype(starts, 'starts') + check_list_elements_dtype(ends, 'ends') + check_list_elements_dtype(strides, 'strides') + def get_new_list_tensor(old_list): new_list_tensor = [] for dim in old_list: diff --git a/python/paddle/fluid/layers/sequence_lod.py b/python/paddle/fluid/layers/sequence_lod.py index d52600268cc..6b2686f7a1e 100644 --- a/python/paddle/fluid/layers/sequence_lod.py +++ b/python/paddle/fluid/layers/sequence_lod.py @@ -239,6 +239,8 @@ def sequence_softmax(input, use_cudnn=False, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_softmax', **locals()) + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'sequence_softmax') dtype = helper.input_dtype() softmax_out = helper.create_variable_for_type_inference(dtype) helper.append_op( @@ -560,10 +562,10 @@ def sequence_slice(input, offset, length, name=None): Args: input(Variable): LoDTensor, The input Variable which consists of the complete - sequences.The data type is float32 or float64. - offset(Variable): LoDTensor, The offset to slice each sequence.The data + sequences.The data type can be float32, float64, int32 or int64 + offset(Variable): LoDTensor, The offset to slice each sequence. The data type is int32 or int64. - length(Variable): LoDTensor, The length of each subsequence.The data + length(Variable): LoDTensor, The length of each subsequence. The data type is int32 or int64. name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, @@ -588,6 +590,15 @@ def sequence_slice(input, offset, length, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper("sequence_slice", **locals()) + + check_variable_and_dtype(input, 'input', + ['float32', 'float64', 'int32', 'int64'], + 'sequence_slice') + check_variable_and_dtype(offset, 'offset', ['int32', 'int64'], + 'sequence_slice') + check_variable_and_dtype(length, 'length', ['int32', 'int64'], + 'sequence_slice') + dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) @@ -1137,7 +1148,7 @@ def sequence_scatter(input, index, updates, name=None): Args: input (Variable): A Tensor with shape of :math:`[N, k_1... k_n]`. Supported data types: float32, float64, int32, int64. - index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type must be int64. + index (Variable): A LoDTensor contains index information. Its LoD level must be 1 and its data type can be int32 or int64. updates (Variable): A LodTensor contains updates information. It has the same LoD level with the index and has the same data type with the input. Supported data types: float32, float64, int32, int64. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, @@ -1161,6 +1172,16 @@ def sequence_scatter(input, index, updates, name=None): assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") helper = LayerHelper('sequence_scatter', **locals()) + + check_variable_and_dtype(input, 'input', + ['float32', 'float64', 'int32', 'int64'], + 'sequence_scatter') + check_variable_and_dtype(index, 'index', ['int32', 'int64'], + 'sequence_scatter') + check_variable_and_dtype(updates, 'updates', + ['float32', 'float64', 'int32', 'int64'], + 'sequence_scatter') + dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 35ca2972a25..50a954d3e07 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -305,6 +305,8 @@ def sequence_conv_pool(input, act="tanh", pool_type="sqrt") """ + + check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'input') conv_out = layers.sequence_conv( input=input, num_filters=num_filters, diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index 2f52b6c399e..eb3db350fc5 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -444,11 +444,11 @@ class TestStridedSliceAPI(unittest.TestCase): minus_1 = fluid.layers.fill_constant([1], "int32", -1) minus_3 = fluid.layers.fill_constant([1], "int32", -3) starts = fluid.layers.data( - name='starts', shape=[3], append_batch_size=False) + name='starts', shape=[3], dtype='int32', append_batch_size=False) ends = fluid.layers.data( - name='ends', shape=[3], append_batch_size=False) + name='ends', shape=[3], dtype='int32', append_batch_size=False) strides = fluid.layers.data( - name='strides', shape=[3], append_batch_size=False) + name='strides', shape=[3], dtype='int32', append_batch_size=False) x = fluid.layers.data( name="x", -- GitLab