diff --git a/paddle/fluid/operators/arg_min_max_op_base.h b/paddle/fluid/operators/arg_min_max_op_base.h index c296ddcfbef703e8484b6ea0b7f96f037e415186..57e1c06f73c56334fc93dee7a16d6899f5a6f12a 100644 --- a/paddle/fluid/operators/arg_min_max_op_base.h +++ b/paddle/fluid/operators/arg_min_max_op_base.h @@ -110,10 +110,12 @@ struct VisitDataArgMinMaxFunctor { CALL_ARG_MINMAX_FUNCTOR(6); break; default: - PADDLE_THROW( - "%s operator doesn't supports tensors whose ranks are greater " - "than 6.", - (EnumArgMinMaxValue == kArgMin ? "argmin" : "argmax")); + PADDLE_ENFORCE_LE( + x_dims.size(), 6, + platform::errors::InvalidArgument( + "%s operator doesn't supports tensors whose ranks are greater " + "than 6.", + (EnumArgMinMaxValue == kArgMin ? "argmin" : "argmax"))); break; #undef CALL_ARG_MINMAX_FUNCTOR } @@ -164,7 +166,8 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_LT( axis, x_dims.size(), platform::errors::InvalidArgument( - "'axis'(%d) must be less than Rank(X)(%d).", axis, x_dims.size())); + "'axis'(%d) must be less than Rank(X)(%d) of Input(X).", axis, + x_dims.size())); const int& dtype = ctx->Attrs().Get("dtype"); PADDLE_ENFORCE_EQ( @@ -192,10 +195,11 @@ class ArgMinMaxOp : public framework::OperatorWithKernel { } PADDLE_ENFORCE_LE( all_element_num, INT_MAX, - "The element num of the argmin/argmax input at axis is " - "%d, is larger than int32 maximum value:%d, you must " - "set the dtype of argmin/argmax to 'int64'.", - all_element_num, INT_MAX); + platform::errors::InvalidArgument( + "The element num of the argmin/argmax input at axis is " + "%d, is larger than int32 maximum value:%d, you must " + "set the dtype of argmin/argmax to 'int64'.", + all_element_num, INT_MAX)); } } std::vector vec; diff --git a/paddle/fluid/operators/assign_op.h b/paddle/fluid/operators/assign_op.h index 6ce04d19fc4376e4263712e2904e480e26590553..c2154f78bbe97418f2c7388a000dc833134d0c84 100644 --- a/paddle/fluid/operators/assign_op.h +++ b/paddle/fluid/operators/assign_op.h @@ -52,7 +52,10 @@ class AssignFunctor { template void operator()(const T &v) const { - PADDLE_THROW("Not support type for assign op %s", typeid(T).name()); + PADDLE_ENFORCE_EQ( + true, false, + platform::errors::PermissionDenied( + "Not support type for assign op with type %s", typeid(T).name())); } private: diff --git a/paddle/fluid/operators/isfinite_op.cc b/paddle/fluid/operators/isfinite_op.cc index af737ec42f631c534bb26ad38901e03d804d07b3..9b92ce3e538aa660dedda67de0cabaa4adbdc8c7 100644 --- a/paddle/fluid/operators/isfinite_op.cc +++ b/paddle/fluid/operators/isfinite_op.cc @@ -43,7 +43,11 @@ class OverflowOp : public framework::OperatorWithKernel { } else if (x_var->IsType()) { dtype = x_var->Get().value().type(); } else { - PADDLE_THROW("Cannot find the input data type by all input data"); + PADDLE_ENFORCE_EQ( + true, false, + platform::errors::InvalidArgument( + "The input type mismatch, the type of Input(X) must be Tensor or " + "SelectedRows, please check your input.")); } return framework::OpKernelType(framework::proto::VarType::Type(dtype), ctx.GetPlace()); diff --git a/paddle/fluid/operators/isfinite_op.h b/paddle/fluid/operators/isfinite_op.h index 83b080856366ac3332c5856a19b721893bb80eb3..2fc0d58669bae428d811c7200e025f36f087b905 100644 --- a/paddle/fluid/operators/isfinite_op.h +++ b/paddle/fluid/operators/isfinite_op.h @@ -57,7 +57,11 @@ class OverflowKernel : public framework::OpKernel { auto& in = ctx.Input("X")->value(); functor(in, out); } else { - PADDLE_THROW("Unsupported input type."); + PADDLE_ENFORCE_EQ( + true, false, + platform::errors::InvalidArgument( + "The input type mismatch, the type of Input(X) must be Tensor or " + "SelectedRows, please check your input.")); } } }; diff --git a/paddle/fluid/operators/linspace_op.cc b/paddle/fluid/operators/linspace_op.cc index 2c3172d2a1112e2c79a3c1215ccd0d3f08d59451..667c6e892956e29478f1401c3cb2622713433037 100644 --- a/paddle/fluid/operators/linspace_op.cc +++ b/paddle/fluid/operators/linspace_op.cc @@ -22,8 +22,6 @@ class LinspaceOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Start"), - "Input(Start) of LinspaceOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Start"), "Input", "Start", "linspace"); OP_INOUT_CHECK(ctx->HasInput("Stop"), "Input", "Stop", "linspace"); OP_INOUT_CHECK(ctx->HasInput("Num"), "Input", "Num", "linspace"); diff --git a/paddle/fluid/operators/linspace_op.cu b/paddle/fluid/operators/linspace_op.cu index 793253b6b8894de8d89b301921383ebfd53d66fc..c51e8785263b5de7a897f3865ed2dabdf93adfaa 100644 --- a/paddle/fluid/operators/linspace_op.cu +++ b/paddle/fluid/operators/linspace_op.cu @@ -63,7 +63,10 @@ class CUDALinspaceKernel : public framework::OpKernel { framework::TensorCopy(*num_t, platform::CPUPlace(), &n); int32_t num = n.data()[0]; - PADDLE_ENFORCE(num > 0, "The num of linspace op should be larger than 0."); + PADDLE_ENFORCE_GT(num, 0, platform::errors::InvalidArgument( + "The num of linspace op should be larger " + "than 0, but received num is %d", + num)); out->Resize(framework::make_ddim({num})); T* out_data = out->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/linspace_op.h b/paddle/fluid/operators/linspace_op.h index 898f611f864dc8bfac2ba7e41b91f5f5bbe524ab..2c30a66ef8e937127fb69a459a901164934b5b13 100644 --- a/paddle/fluid/operators/linspace_op.h +++ b/paddle/fluid/operators/linspace_op.h @@ -46,7 +46,10 @@ class CPULinspaceKernel : public framework::OpKernel { T start = start_t.data()[0]; T stop = stop_t.data()[0]; - PADDLE_ENFORCE(num > 0, "The num of linspace op should be larger than 0."); + PADDLE_ENFORCE_GT(num, 0, platform::errors::InvalidArgument( + "The num of linspace op should be larger " + "than 0, but received num is %d", + num)); out->Resize(framework::make_ddim({num})); diff --git a/paddle/fluid/operators/scale_op.h b/paddle/fluid/operators/scale_op.h index 64ee868fb6d8b1cf55f6400a28c10038efc7884e..11c81d23b2ed271ce89e6a27b1179e7d06dd0ebd 100644 --- a/paddle/fluid/operators/scale_op.h +++ b/paddle/fluid/operators/scale_op.h @@ -60,7 +60,10 @@ class ScaleKernel : public framework::OpKernel { out->mutable_data(in->place()); PADDLE_ENFORCE_EQ(in->dims(), out->dims(), - "in and out should have the same dim"); + paddle::platform::errors::InvalidArgument( + "the input and output should have the same dim" + "but input dim is %s, output dim is %s", + in->dims(), out->dims())); auto eigen_out = framework::EigenVector::Flatten(*out); auto eigen_in = framework::EigenVector::Flatten(*in);