From cf70d5b3502c59851f95de79eadb6278988466b2 Mon Sep 17 00:00:00 2001 From: tangwei12 Date: Wed, 14 Oct 2020 14:45:00 +0800 Subject: [PATCH] fix paddle error informations (#27889) --- .../lookup_sparse_table_fuse_adam_op.cc | 21 ++++++++++++------- .../lookup_sparse_table_fuse_sgd_op.cc | 21 ++++++++++++------- .../distributed_ops/split_byref_op.cc | 6 +++--- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc index e53ce8cc67c..b8328b88da7 100644 --- a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc +++ b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_adam_op.cc @@ -23,22 +23,27 @@ class LargeScaleFuseAdamOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of LargeScaleFuseAdamOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Grad"), + platform::errors::InvalidArgument( + "Input(Grad) of LargeScaleFuseAdamOp should not be null.")); PADDLE_ENFORCE( ctx->HasInput("LearningRate"), - "Input(LearningRate) of LargeScaleFuseAdamOp should not be null."); + platform::errors::InvalidArgument( + "Input(LearningRate) of LargeScaleFuseAdamOp should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 element"); + platform::errors::InvalidArgument( + "Learning rate should have 1 element")); } protected: diff --git a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc index 010658b5280..8794b87f3ff 100644 --- a/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc +++ b/paddle/fluid/operators/distributed_ops/lookup_sparse_table_fuse_sgd_op.cc @@ -23,22 +23,27 @@ class LargeScaleFuseSGDOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Grad"), - "Input(Grad) of LargeScaleFuseSGDOp should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Grad"), + platform::errors::InvalidArgument( + "Input(Grad) of LargeScaleFuseSGDOp should not be null.")); PADDLE_ENFORCE( ctx->HasInput("LearningRate"), - "Input(LearningRate) of LargeScaleFuseSGDOp should not be null."); + platform::errors::InvalidArgument( + "Input(LearningRate) of LargeScaleFuseSGDOp should not be null.")); auto lr_dims = ctx->GetInputDim("LearningRate"); PADDLE_ENFORCE_NE(framework::product(lr_dims), 0, - "Maybe the Input variable LearningRate has not " - "been initialized. You may need to confirm " - "if you put exe.run(startup_program) " - "after optimizer.minimize function."); + platform::errors::InvalidArgument( + "Maybe the Input variable LearningRate has not " + "been initialized. You may need to confirm " + "if you put exe.run(startup_program) " + "after optimizer.minimize function.")); PADDLE_ENFORCE_EQ(framework::product(lr_dims), 1, - "Learning rate should have 1 element"); + platform::errors::InvalidArgument( + "Learning rate should have 1 element")); } protected: diff --git a/paddle/fluid/operators/distributed_ops/split_byref_op.cc b/paddle/fluid/operators/distributed_ops/split_byref_op.cc index 5d26c80f883..042a22b8ff1 100644 --- a/paddle/fluid/operators/distributed_ops/split_byref_op.cc +++ b/paddle/fluid/operators/distributed_ops/split_byref_op.cc @@ -40,9 +40,9 @@ class SplitByrefOp : public framework::OperatorWithKernel { if (ctx->IsRuntime()) { in_axis_dim = in_dims[0]; } - PADDLE_ENFORCE_EQ(in_axis_dim % num, 0, - "tensor split does not result" - " in an equal division"); + PADDLE_ENFORCE_EQ(in_axis_dim % num, 0, platform::errors::InvalidArgument( + "tensor split does not result" + " in an equal division")); size_t out_axis_dim = in_axis_dim / num; for (size_t i = 0; i < outs_number; ++i) { auto dim = in_dims; -- GitLab