diff --git a/paddle/fluid/operators/bpr_loss_op.cc b/paddle/fluid/operators/bpr_loss_op.cc index dbabbf1e4c7c268f39d1955251ec399dc42bd2da..0ecce86b59b41072a600aaff1cb1334e445f5c30 100644 --- a/paddle/fluid/operators/bpr_loss_op.cc +++ b/paddle/fluid/operators/bpr_loss_op.cc @@ -23,22 +23,26 @@ class BprLossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLoss"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLoss"); + OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BprLoss"); auto x_dims = ctx->GetInputDim("X"); auto label_dims = ctx->GetInputDim("Label"); int rank = x_dims.size(); - PADDLE_ENFORCE_EQ(rank, label_dims.size(), - "Input(X) and Input(Label) shall have the same rank."); + PADDLE_ENFORCE_EQ( + rank, label_dims.size(), + platform::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same rank.")); if (ctx->IsRuntime() || (framework::product(x_dims) > 0 && framework::product(label_dims) > 0)) { - PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), - framework::slice_ddim(label_dims, 0, rank - 1), - "Input(X) and Input(Label) shall have the same shape " - "except the last dimension."); + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, 0, rank - 1), + framework::slice_ddim(label_dims, 0, rank - 1), + platform::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same shape " + "except the last dimension.")); } auto y_dims = x_dims; @@ -63,33 +67,41 @@ class BprLossGradientOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "Input(Y@GRAD) shoudl be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X@GRAD) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLossGradient"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLossGradient"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input", + framework::GradVarName("Y"), "BprLossGradient"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "BprLossGradient"); auto x_dims = ctx->GetInputDim("X"); auto label_dims = ctx->GetInputDim("Label"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); int rank = x_dims.size(); - PADDLE_ENFORCE_EQ(dy_dims.size(), rank, - "Input(Y@Grad) and Input(X) should have the same rank."); - PADDLE_ENFORCE_EQ(label_dims.size(), rank, - "Input(Label) and Input(X) should have the same rank."); + PADDLE_ENFORCE_EQ( + dy_dims.size(), rank, + platform::errors::InvalidArgument( + "Input(Y@Grad) and Input(X) should have the same rank.")); + PADDLE_ENFORCE_EQ( + label_dims.size(), rank, + platform::errors::InvalidArgument( + "Input(Label) and Input(X) should have the same rank.")); PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), framework::slice_ddim(label_dims, 0, rank - 1), - "The Input(X) and Input(Label) should have the same " - "shape except the last dimension."); + platform::errors::InvalidArgument( + "The Input(X) and Input(Label) should have the same " + "shape except the last dimension.")); PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), framework::slice_ddim(dy_dims, 0, rank - 1), - "The Input(X) and Input(Y@Grad) should have the same " - "shape except the last dimension."); + platform::errors::InvalidArgument( + "The Input(X) and Input(Y@Grad) should have the same " + "shape except the last dimension.")); PADDLE_ENFORCE_EQ(dy_dims[rank - 1], 1, - "The last dimension of Input(Y@Grad) should be 1."); + platform::errors::InvalidArgument( + "The last dimension of Input(Y@Grad) should be 1.")); PADDLE_ENFORCE_EQ(label_dims[rank - 1], 1, - " the last dimension of Input(Label) should be 1."); + platform::errors::InvalidArgument( + " the last dimension of Input(Label) should be 1.")); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } diff --git a/paddle/fluid/operators/bpr_loss_op.h b/paddle/fluid/operators/bpr_loss_op.h index 65efc8c01abd16f45fc11d725f88da4d243d6256..bebaf6e3365c0972bd0abcc63f66582c8ebc565f 100644 --- a/paddle/fluid/operators/bpr_loss_op.h +++ b/paddle/fluid/operators/bpr_loss_op.h @@ -28,7 +28,6 @@ using Tensor = framework::Tensor; template struct TolerableValue { HOSTDEVICE T operator()(const T& x) const { - PADDLE_ENFORCE_EQ(std::is_floating_point::value, true); const T kApproInf = 1e20; if (x == INFINITY) return kApproInf; if (x == -INFINITY) return -kApproInf; @@ -62,8 +61,11 @@ class BprLossOpKernel : public framework::OpKernel { const int64_t* label_data = labels->data(); for (int i = 0; i < step_size; ++i) { int lbl_pos = label_data[i]; - PADDLE_ENFORCE_GE(lbl_pos, 0); - PADDLE_ENFORCE_LT(lbl_pos, class_num); + PADDLE_ENFORCE_GE(lbl_pos, 0, platform::errors::InvalidArgument( + "label data %d is illegal.", lbl_pos)); + PADDLE_ENFORCE_LT(lbl_pos, class_num, + platform::errors::InvalidArgument( + "label data %d is illegal.", lbl_pos)); int index_pos = i * class_num + lbl_pos; T sum = static_cast(0); for (int j = 0; j < class_num; j++) { diff --git a/paddle/fluid/operators/cvm_op.cc b/paddle/fluid/operators/cvm_op.cc index e5a6a6277fe23e8918c6d76ba455a687017fc865..155f8f518f9046e53659a348fbc202f80586cddb 100644 --- a/paddle/fluid/operators/cvm_op.cc +++ b/paddle/fluid/operators/cvm_op.cc @@ -26,17 +26,20 @@ class CVMOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVM"); + OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVM"); + OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM"); auto x_dims = ctx->GetInputDim("X"); auto cvm_dims = ctx->GetInputDim("CVM"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); - PADDLE_ENFORCE_EQ(cvm_dims.size(), 2UL, "Input(CVM)'s rank should be 2."); - PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, - "The 2nd dimension of " - "Input(CVM) should be 2."); + PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument( + "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + cvm_dims.size(), 2UL, + platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); + PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, platform::errors::InvalidArgument( + "The 2nd dimension of " + "Input(CVM) should be 2.")); if (ctx->Attrs().Get("use_cvm")) { ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]}); @@ -63,27 +66,36 @@ class CVMGradientOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "Input(Y@GRAD) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X@GRAD) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVMGradient"); + OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVMGradient"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input", + framework::GradVarName("Y"), "CVMGradient"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "CVMGradient"); auto x_dims = ctx->GetInputDim("X"); auto cvm_dims = ctx->GetInputDim("CVM"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); - PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); - PADDLE_ENFORCE_EQ(cvm_dims.size(), 2, "Input(CVM)'s rank should be 2."); - - PADDLE_ENFORCE_EQ(x_dims[0], dy_dims[0], - "The 1st dimension of Input(X) and Input(Y@Grad) should " - "be equal."); - - PADDLE_ENFORCE_EQ(cvm_dims[1], 2, - "When Attr(soft_label) == false, the 2nd dimension of " - "Input(CVM) should be 2."); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( + "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + dy_dims.size(), 2, + platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + cvm_dims.size(), 2, + platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); + + PADDLE_ENFORCE_EQ( + x_dims[0], dy_dims[0], + platform::errors::InvalidArgument( + "The 1st dimension of Input(X) and Input(Y@Grad) should " + "be equal.")); + + PADDLE_ENFORCE_EQ( + cvm_dims[1], 2, + platform::errors::InvalidArgument( + "When Attr(soft_label) == false, the 2nd dimension of " + "Input(CVM) should be 2.")); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } diff --git a/paddle/fluid/operators/optimizers/adagrad_op.cc b/paddle/fluid/operators/optimizers/adagrad_op.cc index b3aff1eff8c46cc0dc41f1f58087deb831030032..255dc5bb083114c4bc85739c621f3558d153cc93 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/adagrad_op.cc @@ -29,35 +29,34 @@ class AdagradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(Moment) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of AdagradOp should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), - "Output(MomentOut) of AdagradOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adagrad"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adagrad"); + OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adagrad"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "Adagrad"); + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adagrad"); + OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut", + "Adagrad"); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "LearningRate should have one element"); + platform::errors::InvalidArgument( + "LearningRate should have one element")); auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Grad"), - "Param and Grad input of AdagradOp should have the same dimension."); + platform::errors::InvalidArgument("Param and Grad input of AdagradOp " + "should have the same dimension.")); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment"), - "Param and Moment input of AdagradOp should have the same dimension."); + platform::errors::InvalidArgument("Param and Moment input of AdagradOp " + "should have the same dimension.")); ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims); diff --git a/paddle/fluid/operators/optimizers/adagrad_op.h b/paddle/fluid/operators/optimizers/adagrad_op.h index 5d80102257656acc29a25d3973c6e10b4f51e48f..057bd4e863ddf7ae27b54ee784174e1452619395 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.h +++ b/paddle/fluid/operators/optimizers/adagrad_op.h @@ -47,11 +47,12 @@ class AdagradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); auto *param_out_tensor = ctx.Output("ParamOut"); auto *moment_out_tensor = ctx.Output("MomentOut"); @@ -89,10 +90,14 @@ class AdagradOpKernel : public framework::OpKernel { } } else if (grad_var->IsType()) { auto *param_tensor = ctx.Input("Param"); - PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor); + PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor, + platform::errors::InvalidArgument( + "the input tensor not euqal with output tensor")); auto *moment_tensor = ctx.Input("Moment"); - PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor); + PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor, + platform::errors::InvalidArgument( + "the input moment not eual with output moment")); SparseAdagradFunctor functor; functor(ctx.template device_context(), @@ -100,7 +105,8 @@ class AdagradOpKernel : public framework::OpKernel { *ctx.Input("LearningRate"), epsilon, moment_out_tensor, param_out_tensor); } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Unsupported Variable Type of Grad")); } } }; diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index ff7075a7fc2838c1b73fef1c31e8f8d107ad43b9..24e383c87122a03abe989dafe4ba39fc4be8cbe9 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -376,11 +376,12 @@ class AdamOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; @@ -572,7 +573,8 @@ class AdamOpKernel : public framework::OpKernel { functor(param->numel()); } } else { - PADDLE_THROW("Variable type not supported by adam_op"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Variable type not supported by adam_op")); } } }; diff --git a/paddle/fluid/operators/optimizers/adamax_op.cc b/paddle/fluid/operators/optimizers/adamax_op.cc index 9ede7a56d0b535b2c9a1c538d442424ca6f3e4b7..548197ce5e5594c0b96047e304702bb94fc085fb 100644 --- a/paddle/fluid/operators/optimizers/adamax_op.cc +++ b/paddle/fluid/operators/optimizers/adamax_op.cc @@ -23,57 +23,61 @@ class AdamaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(Moment) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("InfNorm"), - "Input(InfNorm) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"), - "Input(Beta1Pow) of AdamaxOp should not be null."); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Param").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Grad").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front()); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), - "Output(MomentOut) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"), - "Output(InfNormOut) of AdamaxOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("InfNorm"), "Input", "InfNorm", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("Beta1Pow"), "Input", "Beta1Pow", "Adamax"); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Param").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Param").front(), + ctx->GetInputsVarType("Param").front())); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Grad").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Grad").front(), + ctx->GetInputsVarType("Grad").front())); + + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adamax"); + OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut", + "Adamax"); + OP_INOUT_CHECK(ctx->HasOutput("InfNormOut"), "Output", "InfNormOut", + "Adamax"); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 dimension"); + platform::errors::InvalidArgument( + "Learning rate should have 1 dimension")); auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, - "Beta1 power accumulator should have 1 dimension"); + platform::errors::InvalidArgument( + "Beta1 power accumulator should have 1 dimension")); auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Grad"), - "Param and Grad input of AdamaxOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and Grad input of AdamaxOp should have same dimension")); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment"), - "Param and Moment input of AdamaxOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and Moment input of AdamaxOp should have same dimension")); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("InfNorm"), - "Param and InfNorm input of AdamaxOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and InfNorm input of AdamaxOp should have same dimension")); ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims); diff --git a/paddle/fluid/operators/optimizers/adamax_op.h b/paddle/fluid/operators/optimizers/adamax_op.h index b1c869532810e69bb527d8fabdd900a2a6642ab1..df0112448b1cbc82d699dc1ee6f3444bda3b142b 100644 --- a/paddle/fluid/operators/optimizers/adamax_op.h +++ b/paddle/fluid/operators/optimizers/adamax_op.h @@ -24,17 +24,19 @@ class AdamaxOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); const auto* grad_var = ctx.InputVar("Grad"); - PADDLE_ENFORCE(grad_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Grad").front(), - framework::ToTypeName(grad_var->Type())); + PADDLE_ENFORCE_EQ(grad_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Grad").front(), + framework::ToTypeName(grad_var->Type()))); auto param_out_tensor = ctx.Output("ParamOut"); auto moment_out_tensor = ctx.Output("MomentOut"); diff --git a/paddle/fluid/operators/optimizers/ftrl_op.cc b/paddle/fluid/operators/optimizers/ftrl_op.cc index 3f0cd8aa3c8534e348de0e679b31e68ccbfd7822..0c8e6c0b571cfcff61be3d9f5f774e9935755408 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.cc +++ b/paddle/fluid/operators/optimizers/ftrl_op.cc @@ -24,46 +24,50 @@ class FTRLOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("SquaredAccumulator"), - "Input(SquaredAccumulator) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LinearAccumulator"), - "Input(LinearAccumulator) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of FTRL should not be null."); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Param").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Grad").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front()); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("SquaredAccumOut"), - "Output(SquaredAccumOut) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("LinearAccumOut"), - "Output(LinearAccumOut) of FTRL should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("SquaredAccumulator"), "Input", + "SquaredAccumulator", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("LinearAccumulator"), "Input", + "LinearAccumulator", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "FTRL"); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Param").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Param").front(), + ctx->GetInputsVarType("Param").front())); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Grad").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Grad").front(), + ctx->GetInputsVarType("Grad").front())); + + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "FTRL"); + OP_INOUT_CHECK(ctx->HasOutput("SquaredAccumOut"), "Output", + "SquaredAccumOut", "FTRL"); + OP_INOUT_CHECK(ctx->HasOutput("LinearAccumOut"), "Output", "LinearAccumOut", + "FTRL"); auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), - "Two input of FTRL Op's dimension must be same."); + platform::errors::InvalidArgument( + "Two input of FTRL Op's dimension must be same.")); auto lr_dim = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); - PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, - "Learning Rate should be a scalar."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); + PADDLE_ENFORCE_EQ( + framework::product(lr_dim), 1, + platform::errors::InvalidArgument("Learning Rate should be a scalar.")); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("SquaredAccumOut", param_dim); diff --git a/paddle/fluid/operators/optimizers/ftrl_op.h b/paddle/fluid/operators/optimizers/ftrl_op.h index 53799d99a9f03c2679b9c4c7dce99ab56d92d23a..5c7ac48663b172eb7ad126326cced263c1793619 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.h +++ b/paddle/fluid/operators/optimizers/ftrl_op.h @@ -29,17 +29,19 @@ class FTRLOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); const auto* grad_var = ctx.InputVar("Grad"); - PADDLE_ENFORCE(grad_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Grad").front(), - framework::ToTypeName(grad_var->Type())); + PADDLE_ENFORCE_EQ(grad_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Grad").front(), + framework::ToTypeName(grad_var->Type()))); auto* param_out = ctx.Output("ParamOut"); auto* sq_accum_out = ctx.Output("SquaredAccumOut"); diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index f13d2f2d5c1f443d1fb0c4fc67b54b0deb0cd19b..968ddb8fa9f9ce95fb3c5da06838ca345abd7275 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -183,6 +183,8 @@ def bpr_loss(input, label, name=None): """ helper = LayerHelper('bpr_loss', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) + check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], + 'bpr_loss') helper.append_op( type='bpr_loss', inputs={'X': [input], diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 904b9918b29a049e2188ae6e9a5e160983d5a9c2..897c04cdff837b6341c45f25a1437f576614a38c 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13490,6 +13490,8 @@ def continuous_value_model(input, cvm, use_cvm=True): """ helper = LayerHelper('cvm', **locals()) out = helper.create_variable(dtype=input.dtype) + check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], + 'cvm') helper.append_op( type='cvm', inputs={'X': [input],