From a9520db24ef8b101514b0bcaff39dd359f72dcb7 Mon Sep 17 00:00:00 2001 From: Yibing Liu Date: Thu, 14 May 2020 11:02:04 +0800 Subject: [PATCH] Format error message for ops (#24482) * Format error message for ops, test=develop * Fix check in sequence_expand, test=develop --- paddle/fluid/operators/crf_decoding_op.cc | 83 +++++++---- paddle/fluid/operators/crf_decoding_op.h | 16 ++- paddle/fluid/operators/edit_distance_op.cc | 60 +++++--- paddle/fluid/operators/expand_as_op.cc | 28 ++-- paddle/fluid/operators/linear_chain_crf_op.cc | 129 ++++++++++++------ .../sequence_ops/sequence_conv_op.cc | 63 ++++++--- .../operators/sequence_ops/sequence_conv_op.h | 19 ++- .../sequence_ops/sequence_enumerate_op.cc | 8 +- .../sequence_ops/sequence_expand_op.cc | 75 ++++++---- .../sequence_ops/sequence_pool_op.cc | 41 +++--- python/paddle/fluid/layers/loss.py | 2 + python/paddle/fluid/layers/nn.py | 11 +- python/paddle/fluid/layers/sequence_lod.py | 11 +- 13 files changed, 361 insertions(+), 185 deletions(-) diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index 746f96dcac0..6d3e6e34c3b 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -89,41 +89,57 @@ class CRFDecodingOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("Emission"), true, - "Input(Emission) should be not null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("Transition"), true, - "Input(Transition) should be not null."); - - PADDLE_ENFORCE_EQ(ctx->HasOutput("ViterbiPath"), true, - "Output(ViterbiPath) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("Emission"), "Input", "Emission", + "CRFDecoding"); + OP_INOUT_CHECK(ctx->HasInput("Transition"), "Input", "Transition", + "CRFDecoding"); + OP_INOUT_CHECK(ctx->HasOutput("ViterbiPath"), "Output", "ViterbiPath", + "CRFDecoding"); auto emission_dims = ctx->GetInputDim("Emission"); bool has_length = ctx->HasInput("Length"); if (has_length) { PADDLE_ENFORCE_EQ(emission_dims.size(), 3, - "The Input(Emission) should be a 3-D tensor."); + platform::errors::InvalidArgument( + "The Input(Emission) should be a 3-D tensor. But " + "received: input rank %u, input shape [%s]. ", + emission_dims.size(), emission_dims)); } else { PADDLE_ENFORCE_EQ(emission_dims.size(), 2, - "The Input(Emission) should be a 2-D tensor."); + platform::errors::InvalidArgument( + "The Input(Emission) should be a 2-D tensor. But " + "received: input rank %u, input shape [%s].", + emission_dims.size(), emission_dims)); } - PADDLE_ENFORCE_NE(emission_dims[0], 0, - "An empty mini-batch is not allowed."); auto transition_dims = ctx->GetInputDim("Transition"); PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, - "The Input(Transition) should be a 2-D tensor."); + platform::errors::InvalidArgument( + "The Input(Transition) should be a 2-D tensor. But " + "received: input rank %u, input shape [%s].", + transition_dims.size(), transition_dims)); PADDLE_ENFORCE_EQ( transition_dims[0] - 2, transition_dims[1], - "An invalid dimension for the Input(Transition), which should " - "be a 2-D tensor with shape [(D + 2) x D]."); + platform::errors::InvalidArgument( + "An invalid dimension for the Input(Transition), which should " + "be a 2-D tensor with shape [(D + 2) x D]. But received: input " + "rank %u, " + "input shape [%s].", + transition_dims.size(), transition_dims)); if (ctx->IsRuntime() || (emission_dims[emission_dims.size() - 1] > 0 && transition_dims[transition_dims.size() - 1] > 0)) { - PADDLE_ENFORCE_EQ( - emission_dims[emission_dims.size() - 1], - transition_dims[transition_dims.size() - 1], - "The last dimension of the Input(Emission) and the Input(Transition) " - "should be equal to the tag number."); + PADDLE_ENFORCE_EQ(emission_dims[emission_dims.size() - 1], + transition_dims[transition_dims.size() - 1], + platform::errors::InvalidArgument( + "The last dimension of the Input(Emission) and the " + "Input(Transition) " + "should be equal to the tag number. But received " + "Input(Emission): rank " + "%u, shape [%s]; received Input(Transition): rank " + "%u, shape [%s].", + emission_dims.size(), emission_dims, + transition_dims.size(), transition_dims)); } if (ctx->HasInput("Label")) { auto label_dims = ctx->GetInputDim("Label"); @@ -132,20 +148,31 @@ class CRFDecodingOp : public framework::OperatorWithKernel { (label_dims.size() == 3UL && label_dims[2] == 1) || label_dims.size() == 2UL, true, - "The Input(Label) should be a 3-D tensor with last dimension " - "fixed to 1 or a 2-D tensor in padding mode."); + platform::errors::InvalidArgument( + "The Input(Label) should be a 3-D tensor with last dimension " + "fixed to 1 or a 2-D tensor in padding mode. But received: " + "input " + "rank %u, input shape [%s].", + label_dims.size(), label_dims)); } else { - PADDLE_ENFORCE_EQ((label_dims.size() == 2UL && label_dims[1] == 1) || - label_dims.size() == 1UL, - true, - "The Input(Label) should be a 2-D tensor with last " - "dimension fixed to 1 or a 1-D tensor."); + PADDLE_ENFORCE_EQ( + (label_dims.size() == 2UL && label_dims[1] == 1) || + label_dims.size() == 1UL, + true, platform::errors::InvalidArgument( + "The Input(Label) should be a 2-D tensor with last " + "dimension fixed to 1 or a 1-D tensor. But received: " + "input rank %u, input shape [%s].", + label_dims.size(), label_dims)); } if (ctx->IsRuntime() || (emission_dims[0] > 0 && label_dims[0] > 0)) { PADDLE_ENFORCE_EQ( emission_dims[0], label_dims[0], - "The first dimension of Input(Emission) and Input(Label) " - "should be the same."); + platform::errors::InvalidArgument( + "The first dimension of Input(Emission) and Input(Label) " + "should be the same. But received Input(Emission): rank %u, " + "shape [%s]; received Input(Label): rank %u, shape [%s].", + emission_dims.size(), emission_dims, label_dims.size(), + label_dims)); } } diff --git a/paddle/fluid/operators/crf_decoding_op.h b/paddle/fluid/operators/crf_decoding_op.h index eb868602ff4..33108251b3b 100644 --- a/paddle/fluid/operators/crf_decoding_op.h +++ b/paddle/fluid/operators/crf_decoding_op.h @@ -76,9 +76,16 @@ class CRFDecodingOpKernel : public framework::OpKernel { } } else { PADDLE_ENFORCE_EQ(emission_weights->NumLevels(), 1UL, - "The Input(Emission) should be a sequence."); + platform::errors::InvalidArgument( + "The Input(Emission) should be a sequence with lod " + "level 1. But received: lod level %u.", + emission_weights->NumLevels())); auto lod = emission_weights->lod(); - PADDLE_ENFORCE_GT(lod.size(), 0, "Input(Emission) must be a sequence."); + PADDLE_ENFORCE_GT( + lod.size(), 0, + platform::errors::InvalidArgument( + "Input(Emission) must be a sequence. But received: lod level %u.", + lod.size())); const size_t level = 0; const size_t seq_num = lod[level].size() - 1; @@ -92,7 +99,10 @@ class CRFDecodingOpKernel : public framework::OpKernel { } if (label) { PADDLE_ENFORCE_EQ(label->NumLevels(), 1UL, - "The Input(Label) should be a sequence."); + platform::errors::InvalidArgument( + "The Input(label) should be a sequence with lod " + "level 1. But received: lod level %u.", + label->NumLevels())); const int64_t* label_value = label->data(); size_t numel = label->numel(); for (size_t i = 0; i < numel; ++i) { diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index 38756ecd9d1..db8a107290e 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -22,11 +22,11 @@ class EditDistanceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Hyps"), "Input(Hyps) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasInput("Refs"), "Input(Refs) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null."); - PADDLE_ENFORCE(ctx->HasOutput("SequenceNum"), - "Output(SequenceNum) shouldn't be null."); + OP_INOUT_CHECK(ctx->HasInput("Hyps"), "Input", "Hyps", "EditDistance"); + OP_INOUT_CHECK(ctx->HasInput("Refs"), "Input", "Refs", "EditDistance"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "EditDistance"); + OP_INOUT_CHECK(ctx->HasOutput("SequenceNum"), "Output", "SequenceNum", + "EditDistance"); auto hyp_dims = ctx->GetInputDim("Hyps"); auto ref_dims = ctx->GetInputDim("Refs"); @@ -34,23 +34,41 @@ class EditDistanceOp : public framework::OperatorWithKernel { auto hyp_length_dims = ctx->GetInputDim("HypsLength"); auto ref_length_dims = ctx->GetInputDim("RefsLength"); - PADDLE_ENFORCE(hyp_dims.size() == 2 && ref_dims.size() == 2 && - hyp_dims[0] == ref_dims[0], - "Input(Hyps) and Input(Refs) must be 2-D Tensors with " - "identical first dimension"); - PADDLE_ENFORCE(hyp_length_dims[0] == ref_length_dims[0] && - hyp_length_dims[0] == hyp_dims[0], - "Input(HypsLength), Input(RefsLength) and Input(Hyps) " - "should have identical first dimension"); + PADDLE_ENFORCE_EQ( + hyp_dims.size() == 2 && ref_dims.size() == 2 && + hyp_dims[0] == ref_dims[0], + true, platform::errors::InvalidArgument( + "Input(Hyps) and Input(Refs) must be 2-D Tensors with " + "identical first dimension. But received Input(Hyps): " + "input rank %u, input shape [%s]; received Input(Refs): " + "input rank %u, input shape [%s]", + hyp_dims.size(), hyp_dims, ref_dims.size(), ref_dims)); + PADDLE_ENFORCE_EQ( + hyp_length_dims[0] == ref_length_dims[0] && + hyp_length_dims[0] == hyp_dims[0], + true, + platform::errors::InvalidArgument( + "Input(HypsLength), Input(RefsLength) and Input(Hyps) " + "should have identical first dimension. But received " + "Input(HypsLength): input rank %u, input shape [%s]; " + "received Input(RefsLength): input rank %u, input shape " + "[%s]; received Input(Hyps): input rank %u, input shape " + "[%s].", + hyp_length_dims.size(), hyp_length_dims, ref_length_dims.size(), + ref_length_dims, hyp_dims.size(), hyp_dims)); } else { - PADDLE_ENFORCE( - hyp_dims.size() == 2 && hyp_dims[1] == 1, - "Input(Hyps) must be a 2-D LoDTensor with the 2nd dimension " - "equal to 1."); - PADDLE_ENFORCE( - ref_dims.size() == 2 && ref_dims[1] == 1, - "Input(Refs) must be a 2-D LoDTensor with the 2nd dimension " - "equal to 1."); + PADDLE_ENFORCE_EQ( + hyp_dims.size() == 2 && hyp_dims[1] == 1, true, + platform::errors::InvalidArgument( + "Input(Hyps) must be a 2-D LoDTensor with the 2nd dimension " + "equal to 1. But received: input rank %u, input shape [%s].", + hyp_dims.size(), hyp_dims)); + PADDLE_ENFORCE_EQ( + ref_dims.size() == 2 && ref_dims[1] == 1, true, + platform::errors::InvalidArgument( + "Input(Refs) must be a 2-D LoDTensor with the 2nd dimension " + "equal to 1. But received: input rank %u, input shape [%s].", + ref_dims.size(), ref_dims)); } ctx->SetOutputDim("Out", ctx->GetInputDim("Refs")); diff --git a/paddle/fluid/operators/expand_as_op.cc b/paddle/fluid/operators/expand_as_op.cc index ad5bcaf9549..870464efed2 100644 --- a/paddle/fluid/operators/expand_as_op.cc +++ b/paddle/fluid/operators/expand_as_op.cc @@ -24,17 +24,27 @@ class ExpandAsOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true); - PADDLE_ENFORCE_EQ(ctx->HasInput("target_tensor"), true); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ExpandAs"); + OP_INOUT_CHECK(ctx->HasInput("target_tensor"), "Input", "target_tensor", + "ExpandAs"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ExpandAs"); auto x_dims = ctx->GetInputDim("X"); auto target_tensor_dims = ctx->GetInputDim("target_tensor"); - PADDLE_ENFORCE_EQ(static_cast(x_dims.size()), - target_tensor_dims.size(), - "The rank of input(target_tensor) must be equal " - "to the rank of Input(X)."); - PADDLE_ENFORCE_LE(x_dims.size(), 6, - "The rank of Input(X) must not be greater than 6."); + PADDLE_ENFORCE_EQ( + static_cast(x_dims.size()), target_tensor_dims.size(), + platform::errors::InvalidArgument( + "The rank of Input(target_tensor) must be equal " + "to the rank of Input(X). But received Input(X): input " + "rank %u, input shape [%s]; received Input(target_tensor): " + "input rank %u, input shape [%s].", + x_dims.size(), x_dims, target_tensor_dims.size(), + target_tensor_dims)); + PADDLE_ENFORCE_LE( + x_dims.size(), 6, + platform::errors::InvalidArgument( + "The rank of Input(X) must not be greater than 6. But " + "received: input rank %u, input shape [%s].", + x_dims.size(), x_dims)); std::vector out_shape(x_dims.size()); ctx->SetOutputDim("Out", framework::make_ddim(out_shape)); } diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index 6aa6a0ddbed..72eb7fb21d1 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -142,24 +142,27 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Emission"), - "Input(Emission) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Transition"), - "Input(Transition) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - - PADDLE_ENFORCE(ctx->HasOutput("Alpha"), - "Output(Alpha) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("EmissionExps"), - "Output(EmissionExps) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("TransitionExps"), - "Output(TransitionExps) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("LogLikelihood"), - "Output(LogLikelihood) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("Emission"), "Input", "Emission", + "LinearChainCRF"); + OP_INOUT_CHECK(ctx->HasInput("Transition"), "Input", "Transition", + "LinearChainCRF"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "LinearChainCRF"); + + OP_INOUT_CHECK(ctx->HasOutput("Alpha"), "Output", "Alpha", + "LinearChainCRF"); + OP_INOUT_CHECK(ctx->HasOutput("EmissionExps"), "Output", "EmissionExps", + "LinearChainCRF"); + OP_INOUT_CHECK(ctx->HasOutput("TransitionExps"), "Output", "TransitionExps", + "LinearChainCRF"); + OP_INOUT_CHECK(ctx->HasOutput("LogLikelihood"), "Output", "LogLikelihood", + "LinearChainCRF"); auto transition_dims = ctx->GetInputDim("Transition"); - PADDLE_ENFORCE_EQ(transition_dims.size(), 2, - "The Input(Transition) should be a 2-D tensor."); + PADDLE_ENFORCE_EQ(transition_dims.size(), 2UL, + platform::errors::InvalidArgument( + "The Input(Transition) should be a 2-D tensor. But " + "received: input rank %u, input shape [%s].", + transition_dims.size(), transition_dims)); bool check = true; if ((!ctx->IsRuntime()) && (transition_dims[0] <= 0 || transition_dims[1] <= 0)) { @@ -168,49 +171,88 @@ class LinearChainCRFOp : public framework::OperatorWithKernel { if (check) { PADDLE_ENFORCE_EQ( transition_dims[0] - 2, transition_dims[1], - "An invalid dimension for the Input(Transition), which should " - "be a 2-D tensor with shape [(D + 2) x D]."); + platform::errors::InvalidArgument( + "An invalid dimension for the Input(Transition), which should " + "be a 2-D tensor with shape [(D + 2) x D]. But received: input " + "rank %u, " + "input shape [%s].", + transition_dims.size(), transition_dims)); } auto emission_dims = ctx->GetInputDim("Emission"); - PADDLE_ENFORCE_NE(emission_dims[0], 0, - "An empty mini-batch is not allowed."); if (ctx->HasInput("Length")) { PADDLE_ENFORCE_EQ(emission_dims.size(), 3, - "The Input(Emission) should be a 3-D tensor."); + platform::errors::InvalidArgument( + "The Input(Emission) should be a 3-D tensor. But " + "received: input rank %u, input shape [%s].", + emission_dims.size(), emission_dims)); auto label_dims = ctx->GetInputDim("Label"); PADDLE_ENFORCE_EQ( (label_dims.size() == 3UL && label_dims[2] == 1) || (label_dims.size() == 2UL), true, - "The Input(Label) should be a 3-D tensor with last " - "dimension fixed to 1 or a 2-D tensor in padding mode."); + platform::errors::InvalidArgument( + "The Input(Label) should be a 3-D tensor with last dimension " + "fixed to 1 or a 2-D tensor in padding mode. But received: input " + "rank %u, input shape [%s].", + label_dims.size(), label_dims)); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(emission_dims[0], label_dims[0], - "The batch size of Input(Emission) and Input(Label) " - "should be the same."); + platform::errors::InvalidArgument( + "The batch size of Input(Emission) " + "and Input(Label) should be the same. But " + "received Input(Emission): " + "rank %u, shape [%s]; received Input(Label): " + "rank %u, shape [%s].", + emission_dims.size(), emission_dims, + label_dims.size(), label_dims)); PADDLE_ENFORCE_EQ(emission_dims[1], label_dims[1], - "The max length of Input(Emission) and Input(Label) " - "should be the same."); + platform::errors::InvalidArgument( + "The max length of Input(Emission) " + "and Input(Label) should be the same. But " + "received Input(Emission): " + "rank %u, shape [%s]; received Input(Label): " + "rank %u, shape [%s].", + emission_dims.size(), emission_dims, + label_dims.size(), label_dims)); } } else { - PADDLE_ENFORCE_EQ(emission_dims.size(), 2, - "The Input(Emission) should be a 2-D tensor."); + PADDLE_ENFORCE_EQ( + emission_dims.size(), 2, + platform::errors::InvalidArgument( + "The Input(Emission) should be a 2-D tensor. But received: " + "input rank %u, input shape [%s].", + emission_dims.size(), emission_dims)); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(emission_dims[1], transition_dims[1], - "The 2nd dimension of the Input(Emission) and the " - "Input(Transition) " - "should be equal to the tag number."); + platform::errors::InvalidArgument( + "The 2nd dimension of the Input(Emission) and " + "the Input(Transition) " + "should be equal to the tag number. But received " + "Input(Emission): rank " + "%u, shape [%s]; received Input(Transition): " + "rank %u, shape [%s].", + emission_dims.size(), emission_dims, + transition_dims.size(), transition_dims)); } auto label_dims = ctx->GetInputDim("Label"); - PADDLE_ENFORCE_EQ(label_dims.size(), 2, - "The Input(Label) should be a 2-D tensor with the 2nd " - "dimensions fixed to 1."); + PADDLE_ENFORCE_EQ( + label_dims.size(), 2, + platform::errors::InvalidArgument( + "The Input(Label) should be a 2-D tensor with the 2nd " + "dimensions fixed to 1. But received: input rank %u, " + "input shape [%s].", + label_dims.size(), label_dims)); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ( emission_dims[0], label_dims[0], - "The height of Input(Emission) and the height of Input(Label) " - "should be the same."); + platform::errors::InvalidArgument( + "The first dimension of Input(Emission) and Input(Label) " + "should be the same. But received Input(Emission): rank %u, " + "shape " + "[%s]; received Input(Label): rank %u, shape [%s].", + emission_dims.size(), emission_dims, label_dims.size(), + label_dims)); } } ctx->SetOutputDim("Alpha", emission_dims); @@ -239,12 +281,13 @@ class LinearChainCRFGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("EmissionExps"), - "Input(EmissionExps) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("TransitionExps"), - "Input(TransitionExps) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("LogLikelihood")), - "Input(LogLikelihood@GRAD) shoudl be not null."); + OP_INOUT_CHECK(ctx->HasInput("EmissionExps"), "Input", "EmissionExps", + "LinearChainCRFGrad"); + OP_INOUT_CHECK(ctx->HasInput("TransitionExps"), "Input", "TransitionExps", + "LinearChainCRFGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("LogLikelihood")), + "Input", framework::GradVarName("LogLikelihood"), + "LinearChainCRFGrad"); auto transition_exps_dims = ctx->GetInputDim("TransitionExps"); auto emission_exps_dims = ctx->GetInputDim("EmissionExps"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc index d3b36c798f0..99e8064d244 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc @@ -28,25 +28,35 @@ class SequenceConvOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceConvOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Filter"), - "Input(Filter) of SequenceConvOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceConvOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceConv"); + OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "SequenceConv"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceConv"); int context_length = ctx->Attrs().Get("contextLength"); int context_start = ctx->Attrs().Get("contextStart"); auto in_dims = ctx->GetInputDim("X"); auto filter_dims = ctx->GetInputDim("Filter"); - PADDLE_ENFORCE(ctx->Attrs().Get("contextStride") == 1, - "Currently, SequenceConvOp only supports contextStride=1."); - PADDLE_ENFORCE(in_dims.size() == 2 && filter_dims.size() == 2, - "Input(X, Filter) should be 2-D tensor."); - PADDLE_ENFORCE(filter_dims[0] == context_length * in_dims[1], - "Filter's height should be context_length * " - "input_hidden_size ."); + PADDLE_ENFORCE_EQ( + ctx->Attrs().Get("contextStride"), 1, + platform::errors::InvalidArgument( + "Currently, SequenceConvOp only supports contextStride=1. But " + "received contextStride = %u.", + ctx->Attrs().Get("contextStride"))); + PADDLE_ENFORCE_EQ( + in_dims.size() == 2 && filter_dims.size() == 2, true, + platform::errors::InvalidArgument( + "Input(X, Filter) should be 2-D tensor. But received Input(X): " + "input rank %u, input shape [%s]; received Input(Filter): " + "input rank %u, input shape [%s].", + in_dims.size(), in_dims, filter_dims.size(), filter_dims)); + PADDLE_ENFORCE_EQ( + filter_dims[0], context_length * in_dims[1], + platform::errors::InvalidArgument( + "Filter's height should be context_length * " + "input_hidden_size. But received: filter's height = %d, " + "context_length * input_hidden_size = %d.", + filter_dims[0], context_length * in_dims[1])); if (ctx->Attrs().Get("paddingTrainable")) { PADDLE_ENFORCE( @@ -63,12 +73,21 @@ class SequenceConvOp : public framework::OperatorWithKernel { "If context_start is 0 and context_length is 1, paddingTrainable " "should be false."); } - PADDLE_ENFORCE(padding_dim.size() == 2, - "Input(PaddingData) should be 2-D tensor."); - PADDLE_ENFORCE( - padding_dim[0] == total_pad && padding_dim[1] == input_width, - "Input(PaddingData)'s shape is not consistent with 'context_start' " - "and 'context_length'."); + PADDLE_ENFORCE_EQ( + padding_dim.size(), 2, + platform::errors::InvalidArgument( + "Input(PaddingData) should be 2-D tensor. But received: " + "input rank %u, input shape [%s].", + padding_dim.size(), padding_dim)); + PADDLE_ENFORCE_EQ( + padding_dim[0] == total_pad && padding_dim[1] == input_width, true, + platform::errors::InvalidArgument("Input(PaddingData)'s shape is not " + "consistent with 'context_start' " + "and 'context_length'. Received " + "Input(PaddingData): input rank " + "%u, " + "input shape [%s].", + padding_dim.size(), padding_dim)); } in_dims[1] = filter_dims[1]; @@ -83,9 +102,9 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Gradient of output(Out) should not be null."); - PADDLE_ENFORCE(ctx->HasInput("X"), "The input(X) should not be null."); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "SequenceConvGrad"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceConvGrad"); if (ctx->Attrs().Get("paddingTrainable") && ctx->HasOutput(framework::GradVarName("PaddingData"))) { diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h index e35412e31d8..f73b1804199 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.h @@ -41,9 +41,14 @@ class SequenceConvKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( in->lod().empty(), false, - "Input(X) Tensor of SequenceConvOp does not contain LoD information."); - PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, - "Only support one level sequence now."); + platform::errors::InvalidArgument("Input(X) Tensor of SequenceConvOp " + "does not contain LoD information.")); + PADDLE_ENFORCE_EQ( + in->lod().size(), 1UL, + platform::errors::InvalidArgument( + "Only support input sequence with lod level equal to 1 at " + "present. But received: lod level %u.", + in->lod().size())); const Tensor* padding_data = nullptr; if (padding_trainable) { @@ -90,8 +95,12 @@ class SequenceConvGradKernel : public framework::OpKernel { int context_stride = context.Attr("contextStride"); bool padding_trainable = context.Attr("paddingTrainable"); - PADDLE_ENFORCE_EQ(in->lod().size(), 1UL, - "Only support one level sequence now."); + PADDLE_ENFORCE_EQ( + in->lod().size(), 1UL, + platform::errors::InvalidArgument( + "Only support input sequence with lod level equal to 1 at " + "present. But received: lod level %u.", + in->lod().size())); auto lod_g_level_0 = in->lod()[0]; int up_pad = std::max(0, -context_start); diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc index cc4eedbf4de..1dbddfa709d 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc @@ -22,12 +22,8 @@ class SequenceEnumerateOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE( - ctx->HasInput("X"), - "Input(X) of SequecceEnumerate operator should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("Out"), - "Output(X) of SequenceEnumerate operator should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceEnumerate"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceEnumerate"); const auto x_dims = ctx->GetInputDim("X"); const auto win_size = ctx->Attrs().Get("win_size"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc index 7f4cca21965..20251d9533a 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc @@ -26,19 +26,20 @@ class SequenceExpandOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of SequenceExpandOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Y"), - "Input(Y) of SequenceExpandOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of SequenceExpandOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceExpand"); + OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "SequenceExpand"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceExpand"); auto x_dims = ctx->GetInputDim("X"); auto out_dims = x_dims; int ref_level = ctx->Attrs().Get("ref_level"); - PADDLE_ENFORCE_GE(x_dims.size(), 2, - "Dimension number of Input(X) should be at least 2."); + PADDLE_ENFORCE_GE( + x_dims.size(), 2, + platform::errors::InvalidArgument( + "Dimension number of Input(X) should be at least 2. But " + "received: input rank %u, input shape [%s].", + x_dims.size(), x_dims)); if (ctx->IsRuntime()) { framework::Variable* x_var = @@ -50,31 +51,47 @@ class SequenceExpandOp : public framework::OperatorWithKernel { auto& y_lod = y_var->Get().lod(); PADDLE_ENFORCE_LE(x_lod.size(), 1UL, - "Level number of Input(X)'s lod should not be " - "greater than 1."); - PADDLE_ENFORCE_GT(y_lod.size(), 0UL, - "Level number of Input(Y)'s lod should be " - "greater than 0."); - PADDLE_ENFORCE( + platform::errors::InvalidArgument( + "Level of Input(X)'s lod should not be " + "greater than 1. But received: lod level %u.", + x_lod.size())); + PADDLE_ENFORCE_GT( + y_lod.size(), 0UL, + platform::errors::InvalidArgument( + "Level of Input(Y)'s lod should be greater than 0. But " + "received: lod level %u.", + y_lod.size())); + PADDLE_ENFORCE_EQ( ref_level == -1 || (ref_level >= 0 && ref_level < static_cast(y_lod.size())), - "Invlid `ref_level`, which should be either equal to -1 " - "or in [0, %d)", - y_lod.size()); + true, platform::errors::InvalidArgument( + "Invlid `ref_level`, which should be either equal to -1 " + "or in [0, %d), but received `ref_level` = %u.", + y_lod.size(), ref_level)); if (ref_level == -1) ref_level = y_lod.size() - 1; if (x_lod.size() > 0) { - PADDLE_ENFORCE(x_lod[0].size() == y_lod[ref_level].size(), - "Level number of Input(X)'s lod could be 0. Otherwise " - "size of Input(X)'s first level lod should be equal to " - "size of Input(Y)'s referred level lod."); + PADDLE_ENFORCE_EQ( + x_lod[0].size(), y_lod[ref_level].size(), + platform::errors::InvalidArgument( + "Level number of Input(X)'s lod could be 0. Otherwise " + "size of Input(X)'s first level lod should be equal to " + "size of Input(Y)'s referred level lod. But received: " + "Input(X).lod[0].size() = %u, Input(Y).lod[%d].size() = " + "%u", + x_lod[0].size(), ref_level, y_lod[ref_level].size())); } else { - PADDLE_ENFORCE_EQ(x_dims[0], - static_cast(y_lod[ref_level].size()) - 1, - "When Input(X)'s lod is null, the dims[0] of " - "Input(X) should match the " - "size of Input(Y)'s referred level lod."); + PADDLE_ENFORCE_EQ( + x_dims[0], static_cast(y_lod[ref_level].size()) - 1, + platform::errors::InvalidArgument( + "When Input(X)'s lod is null, the dims[0] of " + "Input(X) should match the " + "size of Input(Y)'s referred level lod. But received " + "Input(X): input rank %u, input shape [%s]; received " + "Input(Y).lod[%d].size() - 1 = %d.", + x_dims.size(), x_dims, ref_level, + static_cast(y_lod[ref_level].size()) - 1)); } int64_t out_first_dim = 0; @@ -194,9 +211,9 @@ class SequenceExpandOpGrad : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequenceExpandOpGrad"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "SequenceExpandOpGrad"); auto x_dims = ctx->GetInputDim("X"); auto x_grad_name = framework::GradVarName("X"); diff --git a/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc index 884f65c42bc..8c272c4a15f 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc @@ -24,25 +24,24 @@ class SequencePoolOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of SequencePoolOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of SequencePoolOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequencePool"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequencePool"); if (!ctx->IsRuntime()) { // Check the lod_level for compile-time. auto in_lod_level = ctx->GetLoDLevel("X"); - PADDLE_ENFORCE_GT( - in_lod_level, 0, - "The LoD level Input(X) of sequence_pool should be larger than 0."); + PADDLE_ENFORCE_GT(in_lod_level, 0, platform::errors::InvalidArgument( + "The LoD level of Input(X) should " + "be larger than 0, but received: " + "lod level %u.", + in_lod_level)); ctx->SetLoDLevel("Out", in_lod_level - 1); } ctx->SetOutputDim("Out", ctx->GetInputDim("X")); if (ctx->Attrs().Get("pooltype") == "MAX") { - PADDLE_ENFORCE_EQ( - ctx->HasOutput("MaxIndex"), true, - "Output(MaxIndex) of SequencePoolOp should not be null."); + OP_INOUT_CHECK(ctx->HasOutput("MaxIndex"), "Output", "MaxIndex", + "SequencePool"); ctx->SetOutputDim("MaxIndex", ctx->GetInputDim("X")); } } @@ -113,16 +112,26 @@ class SequencePoolGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true, - "Gradient of Out should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "The input X should not be null."); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + framework::GradVarName("Out"), "SequencePoolGrad"); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "SequencePoolGrad"); + auto og_dims = ctx->GetInputDim(framework::GradVarName("Out")); auto x_dims = ctx->GetInputDim("X"); PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(), - "The rank of output grad must equal to Input(X)."); + platform::errors::InvalidArgument( + "The rank of output grad must equal to Input(X). But " + "received: input rank %u, input shape [%s].", + og_dims.size(), og_dims)); for (int64_t i = 1; i < og_dims.size(); ++i) { - PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); + PADDLE_ENFORCE_EQ( + og_dims[i], x_dims[i], + platform::errors::InvalidArgument( + "The dimension mismatch between Input(OUT@GRAD) and " + "Input(X). Received Input(OUT@GRAD): input rank %u, " + "input shape [%s]; received Input(X): input rank %u, " + "input shape [%s].", + og_dims.size(), og_dims, x_dims.size(), x_dims)); } ctx->ShareDim("X", /*->*/ framework::GradVarName("X")); diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index 968ddb8fa9f..9b6f2235ef5 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -459,6 +459,8 @@ def edit_distance(input, # [4] """ + check_variable_and_dtype(input, 'input', ['int64'], 'edit_distance') + check_variable_and_dtype(label, 'label', ['int64'], 'edit_distance') helper = LayerHelper("edit_distance", **locals()) # remove some tokens from input and labels diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 213a2549e61..fb61ba0956b 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -779,6 +779,9 @@ def linear_chain_crf(input, label, param_attr=None, length=None): print(transition) """ + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'linear_chain_crf') + check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf') helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[2] if length else input.shape[1] transition = helper.create_parameter( @@ -861,6 +864,8 @@ def crf_decoding(input, param_attr, label=None, length=None): crf_decode = fluid.layers.crf_decoding(input=emission, length=length, param_attr=fluid.ParamAttr(name="crfw_pad")) """ + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'crf_decoding') helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) viterbi_path = helper.create_variable_for_type_inference( @@ -10064,7 +10069,11 @@ def expand_as(x, target_tensor, name=None): #(3,20) """ - + check_variable_and_dtype( + x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as') + check_variable_and_dtype(target_tensor, 'target_tensor', + ['float32', 'float64', 'int32', 'int64', 'bool'], + 'expand_as') helper = LayerHelper('expand_as', input=x, **locals()) dtype = helper.input_dtype(input_param_name='x') out = helper.create_variable_for_type_inference(dtype) diff --git a/python/paddle/fluid/layers/sequence_lod.py b/python/paddle/fluid/layers/sequence_lod.py index fd0654d8617..b3a278c4633 100644 --- a/python/paddle/fluid/layers/sequence_lod.py +++ b/python/paddle/fluid/layers/sequence_lod.py @@ -145,6 +145,8 @@ def sequence_conv(input, assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") + check_variable_and_dtype(input, 'input', ['float32', 'float64'], + 'sequence_conv') helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() filter_shape = [filter_size * input.shape[1], num_filters] @@ -338,6 +340,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): """ assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") + check_variable_and_dtype(input, 'input', ['float32'], 'sequence_pool') helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_variable_for_type_inference(dtype) @@ -672,7 +675,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): Args: x (Variable): The input variable which is a Tensor or LoDTensor, with the \ dims ``[M, K]``. The lod level is at most 1. The data type should be \ - float32, float64, int8, int32 or int64. + float32, float64, int32 or int64. y (Variable): The input variable which is a LoDTensor, the lod level is \ at least 1. ref_level (int): Lod level of ``y`` to be referred by ``x``. If set to -1, \ @@ -732,6 +735,8 @@ def sequence_expand(x, y, ref_level=-1, name=None): """ assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") + check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], + 'sequence_expand') helper = LayerHelper('sequence_expand', input=x, **locals()) dtype = helper.input_dtype() tmp = helper.create_variable_for_type_inference(dtype) @@ -1220,7 +1225,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): Args: input (Variable): The input variable which is a index sequence, \ which should be a LodTensor with shape ``[d_1, 1]`` and 1-level lod info. \ - The data type should be float32, float64, int8, int32 or int64. + The data type should be int32 or int64. win_size (int): The window size for enumerating all sub-sequences. pad_value (int, optional): The padding value, default 0. name(str, optional): For detailed information, please refer \ @@ -1243,6 +1248,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): """ assert not in_dygraph_mode(), ( "sequence layer is not supported in dygraph mode yet.") + check_variable_and_dtype(input, 'input', ['int32', 'int64'], + 'sequence_enumerate') helper = LayerHelper('sequence_enumerate', **locals()) out = helper.create_variable_for_type_inference( helper.input_dtype(), stop_gradient=True) -- GitLab