diff --git a/paddle/fluid/operators/distributed_ops/prefetch_op.cc b/paddle/fluid/operators/distributed_ops/prefetch_op.cc index c6395289f8f77552f929222922f851b2db494f94..6037ab1523ec347574db4b18aa1de732acbd3336 100644 --- a/paddle/fluid/operators/distributed_ops/prefetch_op.cc +++ b/paddle/fluid/operators/distributed_ops/prefetch_op.cc @@ -57,7 +57,13 @@ class PrefetchOp : public framework::OperatorBase { } } for (size_t i = 0; i < rets.size(); i++) { - PADDLE_ENFORCE(rets[i]->Wait(), "internal error in RPCClient"); + PADDLE_ENFORCE_EQ( + rets[i]->Wait(), true, + platform::errors::Fatal( + "It's a fatal error of RPCClient that RPCClient can't " + "get the wait result. It may happen when trainers or " + "parameter servers exit un normally or the network " + "issue!")); } } }; diff --git a/paddle/fluid/operators/optimizers/proximal_adagrad_op.cc b/paddle/fluid/operators/optimizers/proximal_adagrad_op.cc index 3e2f12137afc2368aa12fa836c935f804f8c02d9..7fe2a9a94ac3c91dfe59c41ee0ccfe3efb27d088 100644 --- a/paddle/fluid/operators/optimizers/proximal_adagrad_op.cc +++ b/paddle/fluid/operators/optimizers/proximal_adagrad_op.cc @@ -24,34 +24,42 @@ class ProximalAdagradOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of ProximalAdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Moment"), - "Input(Moment) of ProximalAdagradOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of ProximalAdagradOp should not be null."); - PADDLE_ENFORCE( - ctx->HasInput("LearningRate"), - "Input(LearningRate) of ProximalAdagradOp should not be null."); - - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of ProximalAdagradOp should not be null."); - PADDLE_ENFORCE( - ctx->HasOutput("MomentOut"), - "Output(MomentOut) of ProximalAdagradOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", + "ProximalAdagradOp"); + OP_INOUT_CHECK(ctx->HasInput("Moment"), "Input", "Moment", + "ProximalAdagradOp"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "ProximalAdagradOp"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "ProximalAdagradOp"); + + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "ParamOut", + "ProximalAdagradOp"); + OP_INOUT_CHECK(ctx->HasOutput("MomentOut"), "Output", "MomentOut", + "ProximalAdagradOp"); auto param_dim = ctx->GetInputDim("Param"); - PADDLE_ENFORCE_EQ( - param_dim, ctx->GetInputDim("Grad"), - "Param and Grad of ProximalAdagrad Op must have same dimension."); - - PADDLE_ENFORCE_EQ( - param_dim, ctx->GetInputDim("Moment"), - "Param and Moment of ProximalAdagrad Op must have same dimension."); + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), + platform::errors::InvalidArgument( + "The shape of Intput(Param) should be equal to the " + "Input(Grad) of ProximalAdagrad Op. But received " + "Input(Param).dimensions=[%s], " + "Input(Grad).dimensions=[%s]", + param_dim, ctx->GetInputDim("Grad"))); + + PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Moment"), + platform::errors::InvalidArgument( + "The shape of Intput(Param) should be equal to the " + "Input(Moment) of ProximalAdagrad Op. But received " + "Input(Param).dimensions=[%s], " + "Input(Moment).dimensions=[%s]", + param_dim, ctx->GetInputDim("Moment"))); auto lr_dim = ctx->GetInputDim("LearningRate"); - PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, - "Learning Rate should be a scalar."); + PADDLE_ENFORCE_EQ( + framework::product(lr_dim), 1, + platform::errors::InvalidArgument( + "Learning Rate should be a scalar. But received dimension[%s]", + lr_dim)); ctx->SetOutputDim("ParamOut", param_dim); ctx->SetOutputDim("MomentOut", param_dim); diff --git a/paddle/fluid/operators/optimizers/proximal_gd_op.cc b/paddle/fluid/operators/optimizers/proximal_gd_op.cc index cf3c3e2ccb92cd588edea6468b61e6d2e5678be5..edaf55129ad229c5cfc6b5448cf16ec98a08169d 100644 --- a/paddle/fluid/operators/optimizers/proximal_gd_op.cc +++ b/paddle/fluid/operators/optimizers/proximal_gd_op.cc @@ -24,23 +24,29 @@ class ProximalGDOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Param"), - "Input(Param) of ProximalGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of ProximalGDOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("LearningRate"), - "Input(LearningRate) of ProximalGDOp should not be null."); + OP_INOUT_CHECK(ctx->HasInput("Param"), "Input", "Param", "ProximalGDOp"); + OP_INOUT_CHECK(ctx->HasInput("Grad"), "Input", "Grad", "ProximalGDOp"); + OP_INOUT_CHECK(ctx->HasInput("LearningRate"), "Input", "LearningRate", + "ProximalGDOp"); - PADDLE_ENFORCE(ctx->HasOutput("ParamOut"), - "Output(ParamOut) of ProximalGDOp should not be null."); + OP_INOUT_CHECK(ctx->HasOutput("ParamOut"), "Output", "Paramout", + "ProximalGDOp"); auto param_dim = ctx->GetInputDim("Param"); PADDLE_ENFORCE_EQ(param_dim, ctx->GetInputDim("Grad"), - "Two input of ProximalGD Op's dimension must be same."); + platform::errors::InvalidArgument( + "The shape of Intput(Param) should be equal to the " + "Input(Grad) of ProximalGD Op. But received " + "Input(Param).dimensions=[%s], " + "Input(Grad).dimensions=[%s]", + param_dim, ctx->GetInputDim("Grad"))); auto lr_dim = ctx->GetInputDim("LearningRate"); - PADDLE_ENFORCE_EQ(framework::product(lr_dim), 1, - "Learning Rate should be a scalar."); + PADDLE_ENFORCE_EQ( + framework::product(lr_dim), 1, + platform::errors::InvalidArgument( + "Learning Rate should be a scalar. But received dimmensions:[%s]", + lr_dim)); ctx->SetOutputDim("ParamOut", param_dim); }