From 621a4085b9aff772ffc9c5018bcd8b55db609006 Mon Sep 17 00:00:00 2001 From: zhang wenhui Date: Wed, 13 May 2020 11:22:00 +0800 Subject: [PATCH] enhance cvm bpr_loss adam adagrad adamax ftrl error message, test=develop (#24452) --- paddle/fluid/operators/bpr_loss_op.cc | 62 ++++++++------ paddle/fluid/operators/bpr_loss_op.h | 8 +- paddle/fluid/operators/cvm_op.cc | 62 ++++++++------ .../fluid/operators/optimizers/adagrad_op.cc | 39 +++++---- .../fluid/operators/optimizers/adagrad_op.h | 22 +++-- paddle/fluid/operators/optimizers/adam_op.h | 14 ++-- .../fluid/operators/optimizers/adamax_op.cc | 80 ++++++++++--------- paddle/fluid/operators/optimizers/adamax_op.h | 22 ++--- paddle/fluid/operators/optimizers/ftrl_op.cc | 72 +++++++++-------- paddle/fluid/operators/optimizers/ftrl_op.h | 22 ++--- python/paddle/fluid/layers/loss.py | 2 + python/paddle/fluid/layers/nn.py | 2 + 12 files changed, 228 insertions(+), 179 deletions(-) diff --git a/paddle/fluid/operators/bpr_loss_op.cc b/paddle/fluid/operators/bpr_loss_op.cc index dbabbf1e4c7..0ecce86b59b 100644 --- a/paddle/fluid/operators/bpr_loss_op.cc +++ b/paddle/fluid/operators/bpr_loss_op.cc @@ -23,22 +23,26 @@ class BprLossOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLoss"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLoss"); + OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BprLoss"); auto x_dims = ctx->GetInputDim("X"); auto label_dims = ctx->GetInputDim("Label"); int rank = x_dims.size(); - PADDLE_ENFORCE_EQ(rank, label_dims.size(), - "Input(X) and Input(Label) shall have the same rank."); + PADDLE_ENFORCE_EQ( + rank, label_dims.size(), + platform::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same rank.")); if (ctx->IsRuntime() || (framework::product(x_dims) > 0 && framework::product(label_dims) > 0)) { - PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), - framework::slice_ddim(label_dims, 0, rank - 1), - "Input(X) and Input(Label) shall have the same shape " - "except the last dimension."); + PADDLE_ENFORCE_EQ( + framework::slice_ddim(x_dims, 0, rank - 1), + framework::slice_ddim(label_dims, 0, rank - 1), + platform::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same shape " + "except the last dimension.")); } auto y_dims = x_dims; @@ -63,33 +67,41 @@ class BprLossGradientOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "Input(Y@GRAD) shoudl be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X@GRAD) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BprLossGradient"); + OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "BprLossGradient"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input", + framework::GradVarName("Y"), "BprLossGradient"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "BprLossGradient"); auto x_dims = ctx->GetInputDim("X"); auto label_dims = ctx->GetInputDim("Label"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); int rank = x_dims.size(); - PADDLE_ENFORCE_EQ(dy_dims.size(), rank, - "Input(Y@Grad) and Input(X) should have the same rank."); - PADDLE_ENFORCE_EQ(label_dims.size(), rank, - "Input(Label) and Input(X) should have the same rank."); + PADDLE_ENFORCE_EQ( + dy_dims.size(), rank, + platform::errors::InvalidArgument( + "Input(Y@Grad) and Input(X) should have the same rank.")); + PADDLE_ENFORCE_EQ( + label_dims.size(), rank, + platform::errors::InvalidArgument( + "Input(Label) and Input(X) should have the same rank.")); PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), framework::slice_ddim(label_dims, 0, rank - 1), - "The Input(X) and Input(Label) should have the same " - "shape except the last dimension."); + platform::errors::InvalidArgument( + "The Input(X) and Input(Label) should have the same " + "shape except the last dimension.")); PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 0, rank - 1), framework::slice_ddim(dy_dims, 0, rank - 1), - "The Input(X) and Input(Y@Grad) should have the same " - "shape except the last dimension."); + platform::errors::InvalidArgument( + "The Input(X) and Input(Y@Grad) should have the same " + "shape except the last dimension.")); PADDLE_ENFORCE_EQ(dy_dims[rank - 1], 1, - "The last dimension of Input(Y@Grad) should be 1."); + platform::errors::InvalidArgument( + "The last dimension of Input(Y@Grad) should be 1.")); PADDLE_ENFORCE_EQ(label_dims[rank - 1], 1, - " the last dimension of Input(Label) should be 1."); + platform::errors::InvalidArgument( + " the last dimension of Input(Label) should be 1.")); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } diff --git a/paddle/fluid/operators/bpr_loss_op.h b/paddle/fluid/operators/bpr_loss_op.h index 65efc8c01ab..bebaf6e3365 100644 --- a/paddle/fluid/operators/bpr_loss_op.h +++ b/paddle/fluid/operators/bpr_loss_op.h @@ -28,7 +28,6 @@ using Tensor = framework::Tensor; template struct TolerableValue { HOSTDEVICE T operator()(const T& x) const { - PADDLE_ENFORCE_EQ(std::is_floating_point::value, true); const T kApproInf = 1e20; if (x == INFINITY) return kApproInf; if (x == -INFINITY) return -kApproInf; @@ -62,8 +61,11 @@ class BprLossOpKernel : public framework::OpKernel { const int64_t* label_data = labels->data(); for (int i = 0; i < step_size; ++i) { int lbl_pos = label_data[i]; - PADDLE_ENFORCE_GE(lbl_pos, 0); - PADDLE_ENFORCE_LT(lbl_pos, class_num); + PADDLE_ENFORCE_GE(lbl_pos, 0, platform::errors::InvalidArgument( + "label data %d is illegal.", lbl_pos)); + PADDLE_ENFORCE_LT(lbl_pos, class_num, + platform::errors::InvalidArgument( + "label data %d is illegal.", lbl_pos)); int index_pos = i * class_num + lbl_pos; T sum = static_cast(0); for (int j = 0; j < class_num; j++) { diff --git a/paddle/fluid/operators/cvm_op.cc b/paddle/fluid/operators/cvm_op.cc index e5a6a6277fe..155f8f518f9 100644 --- a/paddle/fluid/operators/cvm_op.cc +++ b/paddle/fluid/operators/cvm_op.cc @@ -26,17 +26,20 @@ class CVMOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput("Y"), "Output(Y) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVM"); + OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVM"); + OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "CVM"); auto x_dims = ctx->GetInputDim("X"); auto cvm_dims = ctx->GetInputDim("CVM"); - PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "Input(X)'s rank should be 2."); - PADDLE_ENFORCE_EQ(cvm_dims.size(), 2UL, "Input(CVM)'s rank should be 2."); - PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, - "The 2nd dimension of " - "Input(CVM) should be 2."); + PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, platform::errors::InvalidArgument( + "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + cvm_dims.size(), 2UL, + platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); + PADDLE_ENFORCE_EQ(cvm_dims[1], 2UL, platform::errors::InvalidArgument( + "The 2nd dimension of " + "Input(CVM) should be 2.")); if (ctx->Attrs().Get("use_cvm")) { ctx->SetOutputDim("Y", {x_dims[0], x_dims[1]}); @@ -63,27 +66,36 @@ class CVMGradientOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should be not null."); - PADDLE_ENFORCE(ctx->HasInput("CVM"), "Input(CVM) should be not null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), - "Input(Y@GRAD) should be not null."); - PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), - "Output(X@GRAD) should be not null."); + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CVMGradient"); + OP_INOUT_CHECK(ctx->HasInput("CVM"), "Input", "CVM", "CVMGradient"); + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input", + framework::GradVarName("Y"), "CVMGradient"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + framework::GradVarName("X"), "CVMGradient"); auto x_dims = ctx->GetInputDim("X"); auto cvm_dims = ctx->GetInputDim("CVM"); auto dy_dims = ctx->GetInputDim(framework::GradVarName("Y")); - PADDLE_ENFORCE_EQ(x_dims.size(), 2, "Input(X)'s rank should be 2."); - PADDLE_ENFORCE_EQ(dy_dims.size(), 2, "Input(Y@Grad)'s rank should be 2."); - PADDLE_ENFORCE_EQ(cvm_dims.size(), 2, "Input(CVM)'s rank should be 2."); - - PADDLE_ENFORCE_EQ(x_dims[0], dy_dims[0], - "The 1st dimension of Input(X) and Input(Y@Grad) should " - "be equal."); - - PADDLE_ENFORCE_EQ(cvm_dims[1], 2, - "When Attr(soft_label) == false, the 2nd dimension of " - "Input(CVM) should be 2."); + PADDLE_ENFORCE_EQ(x_dims.size(), 2, platform::errors::InvalidArgument( + "Input(X)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + dy_dims.size(), 2, + platform::errors::InvalidArgument("Input(Y@Grad)'s rank should be 2.")); + PADDLE_ENFORCE_EQ( + cvm_dims.size(), 2, + platform::errors::InvalidArgument("Input(CVM)'s rank should be 2.")); + + PADDLE_ENFORCE_EQ( + x_dims[0], dy_dims[0], + platform::errors::InvalidArgument( + "The 1st dimension of Input(X) and Input(Y@Grad) should " + "be equal.")); + + PADDLE_ENFORCE_EQ( + cvm_dims[1], 2, + platform::errors::InvalidArgument( + "When Attr(soft_label) == false, the 2nd dimension of " + "Input(CVM) should be 2.")); ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->ShareLoD("X", framework::GradVarName("X")); } diff --git a/paddle/fluid/operators/optimizers/adagrad_op.cc b/paddle/fluid/operators/optimizers/adagrad_op.cc index b3aff1eff8c..255dc5bb083 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/adagrad_op.cc @@ -29,35 +29,34 @@ class AdagradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(Moment) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of AdagradOp should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of AdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), - "Output(MomentOut) of AdagradOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adagrad"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adagrad"); + OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adagrad"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "Adagrad"); + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adagrad"); + OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut", + "Adagrad"); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "LearningRate should have one element"); + platform::errors::InvalidArgument( + "LearningRate should have one element")); auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Grad"), - "Param and Grad input of AdagradOp should have the same dimension."); + platform::errors::InvalidArgument("Param and Grad input of AdagradOp " + "should have the same dimension.")); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment"), - "Param and Moment input of AdagradOp should have the same dimension."); + platform::errors::InvalidArgument("Param and Moment input of AdagradOp " + "should have the same dimension.")); ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims); diff --git a/paddle/fluid/operators/optimizers/adagrad_op.h b/paddle/fluid/operators/optimizers/adagrad_op.h index 5d801022576..057bd4e863d 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.h +++ b/paddle/fluid/operators/optimizers/adagrad_op.h @@ -47,11 +47,12 @@ class AdagradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); auto *param_out_tensor = ctx.Output("ParamOut"); auto *moment_out_tensor = ctx.Output("MomentOut"); @@ -89,10 +90,14 @@ class AdagradOpKernel : public framework::OpKernel { } } else if (grad_var->IsType()) { auto *param_tensor = ctx.Input("Param"); - PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor); + PADDLE_ENFORCE_EQ(param_tensor, param_out_tensor, + platform::errors::InvalidArgument( + "the input tensor not euqal with output tensor")); auto *moment_tensor = ctx.Input("Moment"); - PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor); + PADDLE_ENFORCE_EQ(moment_tensor, moment_out_tensor, + platform::errors::InvalidArgument( + "the input moment not eual with output moment")); SparseAdagradFunctor functor; functor(ctx.template device_context(), @@ -100,7 +105,8 @@ class AdagradOpKernel : public framework::OpKernel { *ctx.Input("LearningRate"), epsilon, moment_out_tensor, param_out_tensor); } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Unsupported Variable Type of Grad")); } } }; diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index ff7075a7fc2..24e383c8712 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -376,11 +376,12 @@ class AdamOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; @@ -572,7 +573,8 @@ class AdamOpKernel : public framework::OpKernel { functor(param->numel()); } } else { - PADDLE_THROW("Variable type not supported by adam_op"); + PADDLE_THROW(platform::errors::InvalidArgument( + "Variable type not supported by adam_op")); } } }; diff --git a/paddle/fluid/operators/optimizers/adamax_op.cc b/paddle/fluid/operators/optimizers/adamax_op.cc index 9ede7a56d0b..548197ce5e5 100644 --- a/paddle/fluid/operators/optimizers/adamax_op.cc +++ b/paddle/fluid/operators/optimizers/adamax_op.cc @@ -23,57 +23,61 @@ class AdamaxOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(Moment) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("InfNorm"), - "Input(InfNorm) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Beta1Pow"), - "Input(Beta1Pow) of AdamaxOp should not be null."); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Param").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Grad").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front()); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), - "Output(MomentOut) of AdamaxOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("InfNormOut"), - "Output(InfNormOut) of AdamaxOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("InfNorm"), "Input", "InfNorm", "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "Adamax"); + OP_INOUT_CHECK(ctx->HasInput("Beta1Pow"), "Input", "Beta1Pow", "Adamax"); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Param").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Param").front(), + ctx->GetInputsVarType("Param").front())); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Grad").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Grad").front(), + ctx->GetInputsVarType("Grad").front())); + + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "Adamax"); + OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut", + "Adamax"); + OP_INOUT_CHECK(ctx->HasOutput("InfNormOut"), "Output", "InfNormOut", + "Adamax"); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 dimension"); + platform::errors::InvalidArgument( + "Learning rate should have 1 dimension")); auto beta1_pow_dims = ctx->GetInputDim("Beta1Pow"); PADDLE_ENFORCE_EQ(framework::product(beta1_pow_dims), 1, - "Beta1 power accumulator should have 1 dimension"); + platform::errors::InvalidArgument( + "Beta1 power accumulator should have 1 dimension")); auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Grad"), - "Param and Grad input of AdamaxOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and Grad input of AdamaxOp should have same dimension")); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment"), - "Param and Moment input of AdamaxOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and Moment input of AdamaxOp should have same dimension")); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("InfNorm"), - "Param and InfNorm input of AdamaxOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and InfNorm input of AdamaxOp should have same dimension")); ctx->SetOutputDim("ParamOut", param_dims); ctx->SetOutputDim("MomentOut", param_dims); diff --git a/paddle/fluid/operators/optimizers/adamax_op.h b/paddle/fluid/operators/optimizers/adamax_op.h index b1c86953281..df0112448b1 100644 --- a/paddle/fluid/operators/optimizers/adamax_op.h +++ b/paddle/fluid/operators/optimizers/adamax_op.h @@ -24,17 +24,19 @@ class AdamaxOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); const auto* grad_var = ctx.InputVar("Grad"); - PADDLE_ENFORCE(grad_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Grad").front(), - framework::ToTypeName(grad_var->Type())); + PADDLE_ENFORCE_EQ(grad_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Grad").front(), + framework::ToTypeName(grad_var->Type()))); auto param_out_tensor = ctx.Output("ParamOut"); auto moment_out_tensor = ctx.Output("MomentOut"); diff --git a/paddle/fluid/operators/optimizers/ftrl_op.cc b/paddle/fluid/operators/optimizers/ftrl_op.cc index 3f0cd8aa3c8..0c8e6c0b571 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.cc +++ b/paddle/fluid/operators/optimizers/ftrl_op.cc @@ -24,46 +24,50 @@ class FTRLOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("SquaredAccumulator"), - "Input(SquaredAccumulator) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LinearAccumulator"), - "Input(LinearAccumulator) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of FTRL should not be null."); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Param").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Grad").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front()); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("SquaredAccumOut"), - "Output(SquaredAccumOut) of FTRL should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("LinearAccumOut"), - "Output(LinearAccumOut) of FTRL should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("SquaredAccumulator"), "Input", + "SquaredAccumulator", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("LinearAccumulator"), "Input", + "LinearAccumulator", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "FTRL"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "FTRL"); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Param").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Param").front(), + ctx->GetInputsVarType("Param").front())); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Grad").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->Inputs("Grad").front(), + ctx->GetInputsVarType("Grad").front())); + + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", "FTRL"); + OP_INOUT_CHECK(ctx->HasOutput("SquaredAccumOut"), "Output", + "SquaredAccumOut", "FTRL"); + OP_INOUT_CHECK(ctx->HasOutput("LinearAccumOut"), "Output", "LinearAccumOut", + "FTRL"); auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), - "Two input of FTRL Op's dimension must be same."); + platform::errors::InvalidArgument( + "Two input of FTRL Op's dimension must be same.")); auto lr_dim = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dim), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); - PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, - "Learning Rate should be a scalar."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); + PADDLE_ENFORCE_EQ( + framework::product(lr_dim), 1, + platform::errors::InvalidArgument("Learning Rate should be a scalar.")); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("SquaredAccumOut", param_dim); diff --git a/paddle/fluid/operators/optimizers/ftrl_op.h b/paddle/fluid/operators/optimizers/ftrl_op.h index 53799d99a9f..5c7ac48663b 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.h +++ b/paddle/fluid/operators/optimizers/ftrl_op.h @@ -29,17 +29,19 @@ class FTRLOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); const auto* grad_var = ctx.InputVar("Grad"); - PADDLE_ENFORCE(grad_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Grad").front(), - framework::ToTypeName(grad_var->Type())); + PADDLE_ENFORCE_EQ(grad_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Grad").front(), + framework::ToTypeName(grad_var->Type()))); auto* param_out = ctx.Output("ParamOut"); auto* sq_accum_out = ctx.Output("SquaredAccumOut"); diff --git a/python/paddle/fluid/layers/loss.py b/python/paddle/fluid/layers/loss.py index f13d2f2d5c1..968ddb8fa9f 100644 --- a/python/paddle/fluid/layers/loss.py +++ b/python/paddle/fluid/layers/loss.py @@ -183,6 +183,8 @@ def bpr_loss(input, label, name=None): """ helper = LayerHelper('bpr_loss', **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) + check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], + 'bpr_loss') helper.append_op( type='bpr_loss', inputs={'X': [input], diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 904b9918b29..897c04cdff8 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13490,6 +13490,8 @@ def continuous_value_model(input, cvm, use_cvm=True): """ helper = LayerHelper('cvm', **locals()) out = helper.create_variable(dtype=input.dtype) + check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'], + 'cvm') helper.append_op( type='cvm', inputs={'X': [input], -- GitLab