diff --git a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc index e53ce8cc67c08269e15a20e2cd2fc57a2c5ace17..b8328b88da7d12141bf4ed7974af8fca6321a1a9 100644 --- a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc +++ b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc @@ -23,22 +23,27 @@ class LargeScaleFuseAdamOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of LargeScaleFuseAdamOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Grad"), + platform::errors::InvalidArgument( + "Input(Grad) of LargeScaleFuseAdamOp should not be null.")); PADDLE_ENFORCE( ctx->HasInput("LearningRate"), - "Input(LearningRate) of LargeScaleFuseAdamOp should not be null."); + platform::errors::InvalidArgument( + "Input(LearningRate) of LargeScaleFuseAdamOp should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 element"); + platform::errors::InvalidArgument( + "Learning rate should have 1 element")); } protected: diff --git a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc index 010658b5280d7feeb683112b401dbcaaa265daac..8794b87f3ff40786712d6a1d06bb0dc2e0607671 100644 --- a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc +++ b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc @@ -23,22 +23,27 @@ class LargeScaleFuseSGDOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of LargeScaleFuseSGDOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Grad"), + platform::errors::InvalidArgument( + "Input(Grad) of LargeScaleFuseSGDOp should not be null.")); PADDLE_ENFORCE( ctx->HasInput("LearningRate"), - "Input(LearningRate) of LargeScaleFuseSGDOp should not be null."); + platform::errors::InvalidArgument( + "Input(LearningRate) of LargeScaleFuseSGDOp should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 element"); + platform::errors::InvalidArgument( + "Learning rate should have 1 element")); } protected: diff --git a/paddle/fluid/operators/distributed_ops/split_byref_op.cc b/paddle/fluid/operators/distributed_ops/split_byref_op.cc index 5d26c80f8830a4edda37bfb83be93572ce605f61..042a22b8ff19989cff887e1d8d343c4f26410f6b 100644 --- a/paddle/fluid/operators/distributed_ops/split_byref_op.cc +++ b/paddle/fluid/operators/distributed_ops/split_byref_op.cc @@ -40,9 +40,9 @@ class SplitByrefOp : public framework::OperatorWithKernel { if (ctx->IsRuntime()) { in_axis_dim = in_dims[0]; } - PADDLE_ENFORCE_EQ(in_axis_dim % num, 0, - "tensor split does not result" - " in an equal division"); + PADDLE_ENFORCE_EQ(in_axis_dim % num, 0, platform::errors::InvalidArgument( + "tensor split does not result" + " in an equal division")); size_t out_axis_dim = in_axis_dim / num; for (size_t i = 0; i < outs_number; ++i) { auto dim = in_dims;