From d003573f90537392a064e8284d7fe95d170796fd Mon Sep 17 00:00:00 2001 From: wawltor Date: Wed, 16 Sep 2020 10:40:25 +0800 Subject: [PATCH] add the error message check for the some operator add the error message check for the some operator --- paddle/fluid/operators/arg_min_max_op_base.h | 22 ++++++++++++-------- paddle/fluid/operators/assign_op.h | 5 ++++- paddle/fluid/operators/isfinite_op.cc | 6 +++++- paddle/fluid/operators/isfinite_op.h | 6 +++++- paddle/fluid/operators/linspace_op.cc | 2 -- paddle/fluid/operators/linspace_op.cu | 5 ++++- paddle/fluid/operators/linspace_op.h | 5 ++++- paddle/fluid/operators/scale_op.h | 5 ++++- 8 files changed, 39 insertions(+), 17 deletions(-) diff --git a/paddle/fluid/operators/arg_min_max_op_base.h b/paddle/fluid/operators/arg_min_max_op_base.h index c296ddcfbef..57e1c06f73c 100644 --- a/paddle/fluid/operators/arg_min_max_op_base.h +++ b/paddle/fluid/operators/arg_min_max_op_base.h @@ -110,10 +110,12 @@ struct VisitDataArgMinMaxFunctor { CALL_ARG_MINMAX_FUNCTOR(6); break; default: - PADDLE_THROW( - "%s operator doesn't supports tensors whose ranks are greater " - "than 6.", - (EnumArgMinMaxValue == kArgMin ? "argmin" : "argmax")); + PADDLE_ENFORCE_LE( + x_dims.size(), 6, + platform::errors::InvalidArgument( + "%s operator doesn't supports tensors whose ranks are greater " + "than 6.", + (EnumArgMinMaxValue == kArgMin ? "argmin" : "argmax"))); break; #undef CALL_ARG_MINMAX_FUNCTOR } @@ -164,7 +166,8 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_LT( axis, x_dims.size(), platform::errors::InvalidArgument( - "'axis'(%d) must be less than Rank(X)(%d).", axis, x_dims.size())); + "'axis'(%d) must be less than Rank(X)(%d) of Input(X).", axis, + x_dims.size())); const int& dtype = ctx->Attrs().Get("dtype"); PADDLE_ENFORCE_EQ( @@ -192,10 +195,11 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { } PADDLE_ENFORCE_LE( all_element_num, INT_MAX, - "The element num of the argmin/argmax input at axis is " - "%d, is larger than int32 maximum value:%d, you must " - "set the dtype of argmin/argmax to 'int64'.", - all_element_num, INT_MAX); + platform::errors::InvalidArgument( + "The element num of the argmin/argmax input at axis is " + "%d, is larger than int32 maximum value:%d, you must " + "set the dtype of argmin/argmax to 'int64'.", + all_element_num, INT_MAX)); } } std::vector vec; diff --git a/paddle/fluid/operators/assign_op.h b/paddle/fluid/operators/assign_op.h index 6ce04d19fc4..c2154f78bbe 100644 --- a/paddle/fluid/operators/assign_op.h +++ b/paddle/fluid/operators/assign_op.h @@ -52,7 +52,10 @@ class AssignFunctor { template void operator()(const T &v) const { - PADDLE_THROW("Not support type for assign op %s", typeid(T).name()); + PADDLE_ENFORCE_EQ( + true, false, + platform::errors::PermissionDenied( + "Not support type for assign op with type %s", typeid(T).name())); } private: diff --git a/paddle/fluid/operators/isfinite_op.cc b/paddle/fluid/operators/isfinite_op.cc index af737ec42f6..9b92ce3e538 100644 --- a/paddle/fluid/operators/isfinite_op.cc +++ b/paddle/fluid/operators/isfinite_op.cc @@ -43,7 +43,11 @@ class OverflowOp : public framework::OperatorWithKernel { } else if (x_var->IsType()) { dtype = x_var->Get().value().type(); } else { - PADDLE_THROW("Cannot find the input data type by all input data"); + PADDLE_ENFORCE_EQ( + true, false, + platform::errors::InvalidArgument( + "The input type mismatch, the type of Input(X) must be Tensor or " + "SelectedRows, please check your input.")); } return framework::OpKernelType(framework::proto::VarType::Type(dtype), ctx.GetPlace()); diff --git a/paddle/fluid/operators/isfinite_op.h b/paddle/fluid/operators/isfinite_op.h index 83b08085636..2fc0d58669b 100644 --- a/paddle/fluid/operators/isfinite_op.h +++ b/paddle/fluid/operators/isfinite_op.h @@ -57,7 +57,11 @@ class OverflowKernel : public framework::OpKernel { auto& in = ctx.Input("X")->value(); functor(in, out); } else { - PADDLE_THROW("Unsupported input type."); + PADDLE_ENFORCE_EQ( + true, false, + platform::errors::InvalidArgument( + "The input type mismatch, the type of Input(X) must be Tensor or " + "SelectedRows, please check your input.")); } } }; diff --git a/paddle/fluid/operators/linspace_op.cc b/paddle/fluid/operators/linspace_op.cc index 2c3172d2a11..667c6e89295 100644 --- a/paddle/fluid/operators/linspace_op.cc +++ b/paddle/fluid/operators/linspace_op.cc @@ -22,8 +22,6 @@ class LinspaceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Start"), - "Input(Start) of LinspaceOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Start"), "Input", "Start", "linspace"); OP_INOUT_CHECK(ctx->HasInput("Stop"), "Input", "Stop", "linspace"); OP_INOUT_CHECK(ctx->HasInput("Num"), "Input", "Num", "linspace"); diff --git a/paddle/fluid/operators/linspace_op.cu b/paddle/fluid/operators/linspace_op.cu index 793253b6b88..c51e8785263 100644 --- a/paddle/fluid/operators/linspace_op.cu +++ b/paddle/fluid/operators/linspace_op.cu @@ -63,7 +63,10 @@ class CUDALinspaceKernel : public framework::OpKernel { framework::TensorCopy(*num_t, platform::CPUPlace(), &n); int32_t num = n.data()[0]; - PADDLE_ENFORCE(num > 0, "The num of linspace op should be larger than 0."); + PADDLE_ENFORCE_GT(num, 0, platform::errors::InvalidArgument( + "The num of linspace op should be larger " + "than 0, but received num is %d", + num)); out->Resize(framework::make_ddim({num})); T* out_data = out->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/linspace_op.h b/paddle/fluid/operators/linspace_op.h index 898f611f864..2c30a66ef8e 100644 --- a/paddle/fluid/operators/linspace_op.h +++ b/paddle/fluid/operators/linspace_op.h @@ -46,7 +46,10 @@ class CPULinspaceKernel : public framework::OpKernel { T start = start_t.data()[0]; T stop = stop_t.data()[0]; - PADDLE_ENFORCE(num > 0, "The num of linspace op should be larger than 0."); + PADDLE_ENFORCE_GT(num, 0, platform::errors::InvalidArgument( + "The num of linspace op should be larger " + "than 0, but received num is %d", + num)); out->Resize(framework::make_ddim({num})); diff --git a/paddle/fluid/operators/scale_op.h b/paddle/fluid/operators/scale_op.h index 64ee868fb6d..11c81d23b2e 100644 --- a/paddle/fluid/operators/scale_op.h +++ b/paddle/fluid/operators/scale_op.h @@ -60,7 +60,10 @@ class ScaleKernel : public framework::OpKernel { out->mutable_data(in->place()); PADDLE_ENFORCE_EQ(in->dims(), out->dims(), - "in and out should have the same dim"); + paddle::platform::errors::InvalidArgument( + "the input and output should have the same dim" + "but input dim is %s, output dim is %s", + in->dims(), out->dims())); auto eigen_out = framework::EigenVector::Flatten(*out); auto eigen_in = framework::EigenVector::Flatten(*in); -- GitLab