diff --git a/paddle/fluid/operators/optimizers/dpsgd_op.cc b/paddle/fluid/operators/optimizers/dpsgd_op.cc index 3bcf17fc7b37f1c29e23ccccc7d4df92e705671e..bce00933420e4cfa750e93d563070cc48051d6cc 100644 --- a/paddle/fluid/operators/optimizers/dpsgd_op.cc +++ b/paddle/fluid/operators/optimizers/dpsgd_op.cc @@ -24,32 +24,45 @@ class DpsgdOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("Param"), true, - "Input(Param) of DpsgdOp should not be null."); + platform::errors::NotFound( + "Input(Param) of DpsgdOp should not be null.")); PADDLE_ENFORCE_EQ(ctx->HasInput("Grad"), true, - "Input(Grad) of DpsgdOp should not be null."); - PADDLE_ENFORCE_EQ(ctx->HasInput("LearningRate"), true, - "Input(LearningRate) of DpsgdOp should not be null."); + platform::errors::NotFound( + "Input(Grad) of DpsgdOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("LearningRate"), true, + platform::errors::NotFound( + "Input(LearningRate) of DpsgdOp should not be null.")); PADDLE_ENFORCE_EQ( ctx->GetInputsVarType("Param").front(), framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->GetInputsVarType("Param").front())); PADDLE_ENFORCE_EQ( ctx->GetInputsVarType("Grad").front(), framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Grad").front(), ctx->GetInputsVarType("Grad").front()); + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->GetInputsVarType("Grad").front())); PADDLE_ENFORCE_EQ(ctx->HasOutput("ParamOut"), true, - "Output(ParamOut) of DpsgdOp should not be null."); + platform::errors::NotFound( + "Output(ParamOut) of DpsgdOp should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 dimension"); + platform::errors::InvalidArgument( + "Learning rate should have 1 dimension. But Received " + "LearningRate's dims [%s].", + framework::product(lr_dims))); auto param_dims = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Grad"), - "Param and Grad input of DpsgdOp should have same dimension"); + platform::errors::InvalidArgument( + "Param and Grad input of DpsgdOp should have same dimension. But " + "received Para's dim [%s] and Grad's dim [%s].", + param_dims, ctx->GetInputDim("Grad"))); ctx->SetOutputDim("ParamOut", param_dims); } diff --git a/paddle/fluid/operators/optimizers/dpsgd_op.h b/paddle/fluid/operators/optimizers/dpsgd_op.h index 4eb52feb85108637aa4878d3555bc3cbff674420..e52a1dd9db1791e0be82eb1ee47999d2b8f51175 100644 --- a/paddle/fluid/operators/optimizers/dpsgd_op.h +++ b/paddle/fluid/operators/optimizers/dpsgd_op.h @@ -28,17 +28,19 @@ class DpsgdOpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { const auto *param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType(), true, - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + framework::ToTypeName(param_var->Type()))); const auto *grad_var = ctx.InputVar("Grad"); PADDLE_ENFORCE_EQ(grad_var->IsType(), true, - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Grad").front(), - framework::ToTypeName(grad_var->Type())); + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Grad").front(), + framework::ToTypeName(grad_var->Type()))); const auto *learning_rate = ctx.Input("LearningRate"); diff --git a/paddle/fluid/operators/optimizers/momentum_op.h b/paddle/fluid/operators/optimizers/momentum_op.h index 10b72524efd4a8f9174eab4f45e6173dc56f2c27..083bd91abfc47a4712563c739b333f7417ce21a0 100644 --- a/paddle/fluid/operators/optimizers/momentum_op.h +++ b/paddle/fluid/operators/optimizers/momentum_op.h @@ -40,43 +40,62 @@ class MomentumOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(param) of Momentum should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(grad) of Momentum should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Velocity"), - "Input(velocity) of Momentum should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of Momentum should not be null."); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Param").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of Momentum should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("VelocityOut"), - "Output(VelocityOut) of Momentum should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Param"), true, + platform::errors::NotFound( + "Input(param) of Momentum should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Grad"), true, + platform::errors::NotFound( + "Input(grad) of Momentum should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Velocity"), true, + platform::errors::NotFound( + "Input(velocity) of Momentum should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("LearningRate"), true, + platform::errors::NotFound( + "Input(LearningRate) of Momentum should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->GetInputsVarType("Param").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type should be LoDTensor, but the received is %s", + ctx->GetInputsVarType("Param").front())); + + PADDLE_ENFORCE_EQ(ctx->HasOutput("ParamOut"), true, + platform::errors::NotFound( + "Output(ParamOut) of Momentum should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("VelocityOut"), true, + platform::errors::NotFound( + "Output(VelocityOut) of Momentum should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning_rate should be a scalar"); + platform::errors::InvalidArgument( + "Learning_rate should be a scalar. But Received " + "LearningRate's dim [%s]", + framework::product(lr_dims))); auto param_dim = ctx->GetInputDim("Param"); if (ctx->GetInputsVarType("Grad")[0] == framework::proto::VarType::LOD_TENSOR) { PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Grad"), - "Param and Grad input of MomentumOp should have the same dimension."); + platform::errors::InvalidArgument( + "Param and Grad input of MomentumOp should have the same " + "dimension. But received Param's dim [%s] and Grad's dim [%s].", + param_dim, ctx->GetInputDim("Grad"))); PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Velocity"), - "Param and Velocity of MomentumOp should have the same dimension."); + platform::errors::InvalidArgument( + "Param and Velocity of MomentumOp should have the same " + "dimension. But received Param's dim [%s] and Velocity [%s].", + param_dim, ctx->GetInputDim("Velocity"))); } ctx->SetOutputDim("ParamOut", param_dim); @@ -398,10 +417,12 @@ class MomentumOpKernel : public framework::OpKernel { for_range(functor); } } else { - PADDLE_THROW( - string::Sprintf("MomentumOp only supports LoDTensor or SelectedRows " - "gradient, but the received Variable Type is %s", - framework::ToTypeName(grad_var->Type()))); + PADDLE_ENFORCE_EQ(false, true, + platform::errors::PermissionDenied( + "Unsupported Variable Type of Grad " + "in MomentumOp. Excepted LodTensor " + "or SelectedRows, But received [%s]", + paddle::framework::ToTypeName(grad_var->Type()))); } } }; diff --git a/paddle/fluid/operators/optimizers/rmsprop_op.cc b/paddle/fluid/operators/optimizers/rmsprop_op.cc index eeee008cdc53c457146074060d526d8d0e8b43aa..9e7960c237fdec4d61ab8cf2663558b7b596d087 100644 --- a/paddle/fluid/operators/optimizers/rmsprop_op.cc +++ b/paddle/fluid/operators/optimizers/rmsprop_op.cc @@ -22,47 +22,75 @@ class RmspropOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("MeanSquare"), - "Input(MeanSquare) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(Moment) of RmspropOp should not be null."); - PADDLE_ENFORCE( - ctx->GetInputsVarType("Param").front() == - framework::proto::VarType::LOD_TENSOR, - "The input var's type should be LoDTensor, but the received is %s", - ctx->Inputs("Param").front(), ctx->GetInputsVarType("Param").front()); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(param_out) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("MomentOut"), - "Output(MomentOut) of RmspropOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("MeanSquareOut"), - "Output(MeanSquareOut) of RmspropOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Param"), true, + platform::errors::NotFound( + "Input(Param) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("MeanSquare"), true, + platform::errors::NotFound( + "Input(MeanSquare) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("LearningRate"), true, + platform::errors::NotFound( + "Input(LearningRate) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Grad"), true, + platform::errors::NotFound( + "Input(Grad) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Moment"), true, + platform::errors::NotFound( + "Input(Moment) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->GetInputsVarType("Param").front(), + framework::proto::VarType::LOD_TENSOR, + platform::errors::InvalidArgument( + "The input var's type in RmspropOp should be " + "LoDTensor, but the received is %s", + ctx->GetInputsVarType("Param").front())); + + PADDLE_ENFORCE_EQ( + ctx->HasOutput("ParamOut"), true, + platform::errors::NotFound( + "Output(param_out) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("MomentOut"), true, + platform::errors::NotFound( + "Output(MomentOut) of RmspropOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("MeanSquareOut"), true, + platform::errors::NotFound( + "Output(MeanSquareOut) of RmspropOp should not be null.")); if (ctx->Attrs().Get("centered")) { - PADDLE_ENFORCE(ctx->HasOutput("MeanGradOut"), - "Output(MeanGradOut) of RmspropOp should not be null."); + PADDLE_ENFORCE_EQ( + ctx->HasOutput("MeanGradOut"), true, + platform::errors::NotFound( + "Output(MeanGradOut) of RmspropOp should not be null.")); } auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ( param_dim, ctx->GetInputDim("Grad"), - "Param and grad input of RmspropOp should have the same dimension."); + platform::errors::InvalidArgument( + "Param and grad input of RmspropOp should have the same dimension. " + "But received Param's dim [%s] and Grad's dim [%s].", + param_dim, ctx->GetInputDim("Grad"))); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Moment"), - "Param and Momentum input of RmspropOp " - "should have the same dimension."); + platform::errors::InvalidArgument( + "Param and Momentum input of RmspropOp " + "should have the same dimension. But received " + "Param's dim [%s] and Moment [%s]", + param_dim, ctx->GetInputDim("Moment"))); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("MeanSquare"), - "Param and Momentum input of RmspropOp " - "should have the same dimension."); + platform::errors::InvalidArgument( + "Param and Momentum input of RmspropOp " + "should have the same dimension. But received " + "Param's dim [%s] and MeanSquare [%s]", + param_dim, ctx->GetInputDim("MeanSquare"))); auto lr_dim = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, - "Learning Rate should be a scalar."); + platform::errors::InvalidArgument( + "Learning Rate of RmspropOp should be a scalar. But " + "received LearningRate's dim [%s]", + framework::product(lr_dim))); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("MomentOut", param_dim); diff --git a/paddle/fluid/operators/optimizers/rmsprop_op.h b/paddle/fluid/operators/optimizers/rmsprop_op.h index 4550052b2d614ccbbb09f4a2b9e747708b2a2baa..1ec712a1431a4657cf1c1456da91fe5369914438 100644 --- a/paddle/fluid/operators/optimizers/rmsprop_op.h +++ b/paddle/fluid/operators/optimizers/rmsprop_op.h @@ -148,11 +148,15 @@ class RmspropOpKernel : public framework::OpKernel { auto &mom_tensor = *ctx.Input("Moment"); PADDLE_ENFORCE_EQ(&p_tensor, param_out, - "Param and ParamOut must be the same Tensor"); + platform::errors::InvalidArgument( + "Param and ParamOut must be the same Tensor")); PADDLE_ENFORCE_EQ(&mom_tensor, moment_out, - "Moment and MomentOut must be the same Tensor"); - PADDLE_ENFORCE_EQ(&ms_tensor, mean_square_out, - "MeanSquare and MeanSquareOut must be the same Tensor"); + platform::errors::InvalidArgument( + "Moment and MomentOut must be the same Tensor")); + PADDLE_ENFORCE_EQ( + &ms_tensor, mean_square_out, + platform::errors::InvalidArgument( + "MeanSquare and MeanSquareOut must be the same Tensor")); auto &dev_ctx = ctx.template device_context(); size_t limit = static_cast(ms_tensor.numel()); @@ -179,8 +183,10 @@ class RmspropOpKernel : public framework::OpKernel { auto &mg_tensor = *ctx.Input("MeanGrad"); auto mg = EigenVector::Flatten(mg_tensor); auto *mean_grad_out = ctx.Output("MeanGradOut"); - PADDLE_ENFORCE_EQ(&mg_tensor, mean_grad_out, - "MeanGrad and MeanGradOut must be the same Tensor"); + PADDLE_ENFORCE_EQ( + &mg_tensor, mean_grad_out, + platform::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); auto mg_out = EigenVector::Flatten(*mean_grad_out); mg_out.device(place) = rho * mg + (1 - rho) * g; @@ -198,8 +204,10 @@ class RmspropOpKernel : public framework::OpKernel { if (centered) { auto &mg_tensor = *ctx.Input("MeanGrad"); auto *mean_grad_out = ctx.Output("MeanGradOut"); - PADDLE_ENFORCE_EQ(&mg_tensor, mean_grad_out, - "MeanGrad and MeanGradOut must be the same Tensor"); + PADDLE_ENFORCE_EQ( + &mg_tensor, mean_grad_out, + platform::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); for_range(CenteredRmspropFunctor>( param_out->mutable_data(ctx.GetPlace()), mean_square_out->mutable_data(ctx.GetPlace()), @@ -233,8 +241,10 @@ class RmspropOpKernel : public framework::OpKernel { if (centered) { auto &mg_tensor = *ctx.Input("MeanGrad"); auto *mean_grad_out = ctx.Output("MeanGradOut"); - PADDLE_ENFORCE_EQ(&mg_tensor, mean_grad_out, - "MeanGrad and MeanGradOut must be the same Tensor"); + PADDLE_ENFORCE_EQ( + &mg_tensor, mean_grad_out, + platform::errors::InvalidArgument( + "MeanGrad and MeanGradOut must be the same Tensor")); for_range(CenteredRmspropFunctor>( param_out->mutable_data(ctx.GetPlace()), mean_square_out->mutable_data(ctx.GetPlace()), @@ -249,7 +259,12 @@ class RmspropOpKernel : public framework::OpKernel { rho, epsilon, momentum, grad_func)); } } else { - PADDLE_THROW("RMSProp only supports LoDTensor or SelectedRows gradient"); + PADDLE_ENFORCE_EQ(false, true, + platform::errors::PermissionDenied( + "Unsupported Variable Type of Grad " + "in RmspropOp. Excepted LodTensor " + "or SelectedRows, But received [%s]", + paddle::framework::ToTypeName(grad_var->Type()))); } } }; diff --git a/paddle/fluid/operators/optimizers/sgd_op.cc b/paddle/fluid/operators/optimizers/sgd_op.cc index aeff8da70b958a440953824c46a095a4f86b9379..569dbcd6a3ee105ae8dd8570bbb2215c32343d01 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cc +++ b/paddle/fluid/operators/optimizers/sgd_op.cc @@ -22,23 +22,31 @@ class SGDOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of SGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of SGDOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("Param"), true, + platform::errors::NotFound( + "Input(Param) of SGDOp should not be null.")); + PADDLE_ENFORCE_EQ( + ctx->HasInput("Grad"), true, + platform::errors::NotFound("Input(Grad) of SGDOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("LearningRate"), true, + platform::errors::NotFound( + "Input(LearningRate) of SGDOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("ParamOut"), true, + platform::errors::NotFound( + "Output(ParamOut) of SGDOp should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::NotFound( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 element"); + platform::errors::InvalidArgument( + "Learning rate should have 1 element. But received " + "LearningRate dims [%s]", + framework::product(lr_dims))); auto param_dim = ctx->GetInputDim("Param"); if (ctx->GetInputsVarType("Grad")[0] == framework::proto::VarType::LOD_TENSOR) { diff --git a/paddle/fluid/operators/optimizers/sgd_op.cu b/paddle/fluid/operators/optimizers/sgd_op.cu index b70f24e0e5e8f2f6c6ac974942ccd4c4c3ad41bb..a5d9ad271f23a4d6a17f07a6c5b6a7d57aa7a3c5 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cu +++ b/paddle/fluid/operators/optimizers/sgd_op.cu @@ -57,11 +57,12 @@ class SGDOpKernel public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); - PADDLE_ENFORCE(param_var->IsType(), - "The Var(%s)'s type should be LoDTensor, " - "but the received is %s", - ctx.InputNames("Param").front(), - framework::ToTypeName(param_var->Type())); + PADDLE_ENFORCE_EQ(param_var->IsType(), true, + platform::errors::InvalidArgument( + "The Var(%s)'s type should be LoDTensor, " + "but the received is %s", + ctx.InputNames("Param").front(), + paddle::framework::ToTypeName(param_var->Type()))); auto* param = ctx.Input("Param"); auto* param_out = ctx.Output("ParamOut"); @@ -91,18 +92,30 @@ class SGDOpKernel // TODO(qijun): In Sparse SGD operator, in-place update is enforced. // This manual optimization brings difficulty to track data dependency. // It's better to find a more elegant solution. - PADDLE_ENFORCE_EQ(param, param_out); + PADDLE_ENFORCE_EQ( + param, param_out, + platform::errors::InvalidArgument( + "The input tensor Param of SgdOp should be equal with ParamOut " + "if variable's type is SelectedRows.")); auto* grad = ctx.Input("Grad"); auto in_height = grad->height(); auto out_dims = param_out->dims(); - PADDLE_ENFORCE_EQ(in_height, out_dims[0]); + PADDLE_ENFORCE_EQ(in_height, out_dims[0], + platform::errors::InvalidArgument( + "The input tensor Grad's height of SgdOp should be " + "equal with ParamOut's dims. But received Grad's " + "height [%s] and ParamOut's dims [%s]", + in_height, out_dims[0])); auto& in_value = grad->value(); auto& in_rows = grad->rows(); int64_t in_row_numel = in_value.numel() / in_rows.size(); - PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); + PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height, + platform::errors::InvalidArgument( + "The in_row_numel of SgdOp should be equal with " + "param_out's numel / in_height.")); auto* in_data = in_value.data(); auto* out_data = param_out->data(); @@ -118,7 +131,12 @@ class SGDOpKernel out_data, in_row_numel, in_rows.size()); } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_ENFORCE_EQ(false, true, + platform::errors::PermissionDenied( + "Unsupported Variable Type of Grad " + "in SgdOp. Excepted LodTensor or " + "SelectedRows, But received [%s]", + paddle::framework::ToTypeName(grad_var->Type()))); } } }; diff --git a/paddle/fluid/operators/optimizers/sgd_op.h b/paddle/fluid/operators/optimizers/sgd_op.h index 539d774a395d739ae98adeb6bd2679d0487d0f06..1aaf95efc3250747a4cb46ab79b4415a6b527907 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.h +++ b/paddle/fluid/operators/optimizers/sgd_op.h @@ -44,8 +44,20 @@ class SGDOpKernel if (grad_var->IsType()) { const auto *grad = ctx.Input("Grad"); auto sz = param_out->numel(); - PADDLE_ENFORCE_EQ(param->numel(), sz); - PADDLE_ENFORCE_EQ(grad->numel(), sz); + PADDLE_ENFORCE_EQ(param->numel(), sz, + platform::errors::InvalidArgument( + "The input tensor Param's numel of SgdOp " + "should be equal with ParamOut's numel. " + "But received Param's " + "numel = [%s], ParamOut's numel = [%s]", + param->numel(), sz)); + PADDLE_ENFORCE_EQ(grad->numel(), sz, + platform::errors::InvalidArgument( + "The input tensor Grad's numel of SgdOp " + "should be equal with ParamOut's numel. " + "But received Grad's " + "numel = [%s], ParamOut's numel = [%s]", + grad->numel(), sz)); jit::sgd_attr_t attr(1, sz, 1, sz, 1); const T *lr = learning_rate->data(); @@ -62,7 +74,11 @@ class SGDOpKernel // TODO(qijun): In Sparse SGD operator, in-place update is enforced. // This manual optimization brings difficulty to track data dependency. // It's better to find a more elegant solution. - PADDLE_ENFORCE_EQ(param, param_out); + PADDLE_ENFORCE_EQ(param, param_out, + platform::errors::InvalidArgument( + "The input tensor Param of SgdOp " + "should be equal with ParamOut if variable's " + "type is SelectedRows. ")); const auto *grad = ctx.Input("Grad"); auto &grad_rows = grad->rows(); @@ -73,7 +89,13 @@ class SGDOpKernel } auto out_dims = param_out->dims(); - PADDLE_ENFORCE_EQ(grad->height(), out_dims[0]); + PADDLE_ENFORCE_EQ( + grad->height(), out_dims[0], + platform::errors::InvalidArgument( + "The input tensor Grad's height of SgdOp " + "should be equal with ParamOut's dims. But received Grad's " + "height [%s] and ParamOut's dims [%s]", + grad->height(), out_dims[0])); auto &grad_value = grad->value(); const T *param_data = param->data(); const T *grad_data = grad_value.data(); @@ -87,19 +109,31 @@ class SGDOpKernel attr.grad_height = grad_rows.size(); // note: it is not grad->height() attr.grad_width = grad_value.numel() / attr.grad_height; attr.selected_rows_size = grad_rows.size(); - PADDLE_ENFORCE_EQ(attr.grad_width, attr.param_width); + PADDLE_ENFORCE_EQ( + attr.grad_width, attr.param_width, + platform::errors::InvalidArgument( + "The grad_value's numel of SgdOp " + "should be equal with param_out's numel. But received " + "grad_value's numel [%s] and param_out's numel [%s]", + attr.grad_width, attr.param_width)); auto sgd = jit::KernelFuncs, platform::CPUPlace>::Cache().At( attr); sgd(lr, param_data, grad_data, rows_data, out_data, &attr); } else { - PADDLE_THROW("Unsupported Variable Type of Grad"); + PADDLE_ENFORCE_EQ( + false, true, + platform::errors::PermissionDenied( + "Unsupported Variable Type of Grad in SgdOp. Excepted " + "LodTensor or SelectedRows, But received [%s]", + paddle::framework::ToTypeName(grad_var->Type()))); } } else if (param_var->IsType()) { - PADDLE_ENFORCE(grad_var->IsType(), - "when param " - "is SelectedRows, gradient should also be SelectedRows"); + PADDLE_ENFORCE_EQ(grad_var->IsType(), true, + platform::errors::InvalidArgument( + "when param is SelectedRows, " + "gradient should also be SelectedRows")); const auto ¶m = param_var->Get(); auto *param_out = ctx.Output("ParamOut"); const auto &grad = grad_var->Get(); @@ -112,27 +146,36 @@ class SGDOpKernel auto param_row_width = param.value().dims()[1]; auto grad_row_width = grad.value().dims()[1]; - VLOG(4) << " param rows: " << param.rows().size() - << " param memory rows: " << param.value().dims()[0] - << " grad rows: " << grad.rows().size() - << " grad memory rows: " << grad.value().dims()[0]; - PADDLE_ENFORCE_EQ(param_row_width, grad_row_width, - "param_row should have the same size with grad_row"); + PADDLE_ENFORCE_EQ( + param_row_width, grad_row_width, + platform::errors::InvalidArgument( + "The param_row in SgdOP should have the same size with grad_row. " + "But received param_row's width is [%s], and grad_row's width is " + "[%s]", + param_row_width, grad_row_width)); const auto *lr = learning_rate->data(); const auto *grad_data = grad.value().data(); auto *out_data = param_out->mutable_value()->data(); for (size_t i = 0; i < grad.rows().size(); i++) { int64_t id_index = param_out->AutoGrownIndex(grad.rows()[i], false); - PADDLE_ENFORCE_GE(id_index, static_cast(0), - "id should be in the table"); + PADDLE_ENFORCE_GE( + id_index, static_cast(0), + platform::errors::InvalidArgument( + "The id in SgdOp should be >= 0. But recevied id_index is [%s]", + id_index)); for (int64_t j = 0; j < grad_row_width; j++) { out_data[id_index * grad_row_width + j] -= lr[0] * grad_data[i * grad_row_width + j]; } } } else { - PADDLE_THROW("Unsupported Variable Type of Parameter"); + PADDLE_ENFORCE_EQ( + false, true, + platform::errors::PermissionDenied( + "Unsupported Variable Type of Parameter in SgdOp. Excepted " + "LodTensor or SelectedRows, But received [%s]", + paddle::framework::ToTypeName(param_var->Type()))); } } };