From a35a2ee9573d2bb591a5531a40f3e6c163d2c83a Mon Sep 17 00:00:00 2001 From: smallv0221 <397551318@qq.com> Date: Fri, 25 Sep 2020 09:12:41 +0000 Subject: [PATCH] lstm_unit error message enhancement. lstmp error message enhancement. sequence_conv error message enhencement. sequence_enumerate error message enhencement. sequence_mask error message enhencement. --- paddle/fluid/operators/lstm_unit_op.cc | 28 +++++++++---------- paddle/fluid/operators/lstm_unit_op.cu | 10 ++++--- paddle/fluid/operators/lstm_unit_op.h | 10 ++++--- paddle/fluid/operators/lstmp_op.h | 6 ++-- .../sequence_ops/sequence_conv_op.cc | 7 +++-- .../sequence_ops/sequence_enumerate_op.cc | 3 +- .../sequence_ops/sequence_enumerate_op.cu | 3 +- .../sequence_ops/sequence_enumerate_op.h | 19 ++++++++----- .../sequence_ops/sequence_mask_op.cc | 6 ++-- 9 files changed, 54 insertions(+), 38 deletions(-) diff --git a/paddle/fluid/operators/lstm_unit_op.cc b/paddle/fluid/operators/lstm_unit_op.cc index c325c0892ed..e6ffda201ba 100644 --- a/paddle/fluid/operators/lstm_unit_op.cc +++ b/paddle/fluid/operators/lstm_unit_op.cc @@ -23,23 +23,23 @@ class LstmUnitOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasInput("C_prev"), - "Input(C_prev) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("C"), - "Output(C) of LSTM should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("H"), - "Output(H) of LSTM should not be null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "lstm_unit"); + OP_INOUT_CHECK(ctx->HasInput("C_prev"), "Input", "C_prev", "lstm_unit"); + OP_INOUT_CHECK(ctx->HasInput("C"), "Output", "C", "lstm_unit"); + OP_INOUT_CHECK(ctx->HasInput("H"), "Output", "H", "lstm_unit"); auto x_dims = ctx->GetInputDim("X"); auto c_prev_dims = ctx->GetInputDim("C_prev"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank must be 2."); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( + "Input(X)'s rank must be 2.")); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(x_dims[0], c_prev_dims[0], - "Batch size of inputs and states must be equal"); + platform::errors::InvalidArgument( + "Batch size of inputs and states must be equal")); PADDLE_ENFORCE_EQ(x_dims[1], c_prev_dims[1] * 4, - "Dimension of FC should equal to prev state * 4"); + platform::errors::InvalidArgument( + "Dimension of FC should equal to prev state * 4")); } int b_size = c_prev_dims[0]; // batch size @@ -85,10 +85,10 @@ class LstmUnitGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("C")), - "Input(C@GRAD) should not be null"); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("H")), - "Input(H@GRAD) should not be null"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("C")), "Input", + framework::GradVarName("C"), "lstm_unit"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("H")), "Input", + framework::GradVarName("H"), "lstm_unit"); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("C_prev"), ctx->GetInputDim("C_prev")); diff --git a/paddle/fluid/operators/lstm_unit_op.cu b/paddle/fluid/operators/lstm_unit_op.cu index 810b83cb535..3949a066e08 100644 --- a/paddle/fluid/operators/lstm_unit_op.cu +++ b/paddle/fluid/operators/lstm_unit_op.cu @@ -93,8 +93,9 @@ template class LstmUnitOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto* x_tensor = ctx.Input("X"); auto* c_prev_tensor = ctx.Input("C_prev"); @@ -124,8 +125,9 @@ template class LstmUnitGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "It must use CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet("It must use CUDAPlace.")); auto x_tensor = ctx.Input("X"); auto c_prev_tensor = ctx.Input("C_prev"); diff --git a/paddle/fluid/operators/lstm_unit_op.h b/paddle/fluid/operators/lstm_unit_op.h index 3fe7bda39b6..9faed3ca2ce 100644 --- a/paddle/fluid/operators/lstm_unit_op.h +++ b/paddle/fluid/operators/lstm_unit_op.h @@ -39,8 +39,9 @@ template class LstmUnitKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE( + platform::is_cpu_place(ctx.GetPlace()), + paddle::platform::errors::PreconditionNotMet("It must use CPUPlace.")); auto* x_tensor = ctx.Input("X"); auto* c_prev_tensor = ctx.Input("C_prev"); @@ -82,8 +83,9 @@ template class LstmUnitGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE( + platform::is_cpu_place(ctx.GetPlace()), + paddle::platform::errors::PreconditionNotMet("It must use CPUPlace.")); auto x_tensor = ctx.Input("X"); auto c_prev_tensor = ctx.Input("C_prev"); diff --git a/paddle/fluid/operators/lstmp_op.h b/paddle/fluid/operators/lstmp_op.h index f0a727f34fe..a2d1d5295be 100644 --- a/paddle/fluid/operators/lstmp_op.h +++ b/paddle/fluid/operators/lstmp_op.h @@ -91,7 +91,8 @@ class LSTMPKernel : public framework::OpKernel { else if (act_type == math::detail::ActivationType::kReLU) ReluFunctor()(d, x, y); else - PADDLE_THROW("unsupported activation type"); + PADDLE_THROW( + platform::errors::InvalidArgument("unsupported activation type")); } void Compute(const framework::ExecutionContext& ctx) const override { @@ -263,7 +264,8 @@ class LSTMPGradKernel : public framework::OpKernel { else if (act_type == math::detail::ActivationType::kReLU) ReluGradFunctor()(d, x, y, dy, dx); else - PADDLE_THROW("unsupported activation type"); + PADDLE_THROW( + platform::errors::InvalidArgument("unsupported activation type")); } void Compute(const framework::ExecutionContext& ctx) const override { diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc index 99e8064d244..b5ca370088b 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc @@ -61,7 +61,8 @@ class SequenceConvOp : public framework::OperatorWithKernel { if (ctx->Attrs().Get("paddingTrainable")) { PADDLE_ENFORCE( ctx->HasInput("PaddingData"), - "Input(PaddingData) of SequenceConvOp should not be null."); + platform::errors::InvalidArgument( + "Input(PaddingData) of SequenceConvOp should not be null.")); framework::DDim padding_dim = ctx->GetInputDim("PaddingData"); int up_pad = std::max(0, -context_start); int down_pad = std::max(0, context_start + context_length - 1); @@ -69,9 +70,9 @@ class SequenceConvOp : public framework::OperatorWithKernel { int input_width = static_cast(in_dims[1]); if (context_start == 0 && context_length == 1) { - PADDLE_THROW( + PADDLE_THROW(platform::errors::InvalidArgument( "If context_start is 0 and context_length is 1, paddingTrainable " - "should be false."); + "should be false.")); } PADDLE_ENFORCE_EQ( padding_dim.size(), 2, diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc index 1dbddfa709d..7a488de1de3 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cc @@ -44,7 +44,8 @@ class SequenceEnumerateOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("win_size", "(int) The enumerate sequence window size.") .AddCustomChecker([](const int& win_size) { PADDLE_ENFORCE(win_size >= 2, - "The window size should be not less than 2."); + platform::errors::InvalidArgument( + "The window size should be not less than 2.")); }); AddAttr("pad_value", "(int) The enumerate sequence padding value.") .SetDefault(0); diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu index d5deb7582c7..2790f30a9bd 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.cu @@ -58,7 +58,8 @@ class SequenceEnumerateOpCUDAKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ( static_cast(in_dims[0]), in_lod[0].back(), - "The actual input data's size mismatched with LoD information."); + platform::errors::InvalidArgument( + "The actual input data's size mismatched with LoD information.")); /* Generate enumerate sequence set */ auto stream = context.cuda_device_context().stream(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h index 4807521bc0d..19e0bc1cdeb 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h +++ b/paddle/fluid/operators/sequence_ops/sequence_enumerate_op.h @@ -29,21 +29,26 @@ class SequenceEnumerateKernel : public framework::OpKernel { int win_size = context.Attr("win_size"); auto pad_value = static_cast(context.Attr("pad_value")); - PADDLE_ENFORCE_EQ(in->lod().empty(), false, - "Input(X) Tensor of SequenceEnumerateOp does not contain " - "LoD information."); + PADDLE_ENFORCE_EQ( + in->lod().empty(), false, + platform::errors::InvalidArgument( + "Input(X) Tensor of SequenceEnumerateOp does not contain " + "LoD information.")); auto in_dims = in->dims(); auto lod0 = in->lod()[0]; PADDLE_ENFORCE_EQ( static_cast(in_dims[0]), lod0.back(), - "The actual input data's size mismatched with LoD information."); + platform::errors::InvalidArgument( + "The actual input data's size mismatched with LoD information.")); PADDLE_ENFORCE_EQ( in_dims.size(), 2UL, - "Input(X) of SequenceEnumerate operator's rank should be 2."); + platform::errors::InvalidArgument( + "Input(X) of SequenceEnumerate operator's rank should be 2.")); PADDLE_ENFORCE_EQ(in_dims[1], 1, - "Input(X) of SequenceEnumerate operator's 2nd " - "dimension should be 1."); + platform::errors::InvalidArgument( + "Input(X) of SequenceEnumerate operator's 2nd " + "dimension should be 1.")); // Generate enumerate sequence set auto in_data = in->data(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc index b8912dd4c79..7cb6edb87ea 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_mask_op.cc @@ -69,8 +69,10 @@ class SequenceMaskOpMaker : public framework::OpProtoAndCheckerMaker { "= max(Input(X)).") .SetDefault(-1) .AddCustomChecker([](const int& v) { - PADDLE_ENFORCE(v < 0 || v >= 1, - "Attr(maxlen) must be less than 0 or larger than 1"); + PADDLE_ENFORCE( + v < 0 || v >= 1, + platform::errors::InvalidArgument( + "Attr(maxlen) must be less than 0 or larger than 1")); }); AddAttr("out_dtype", "Output data type"); AddComment(R"DOC( -- GitLab