diff --git a/paddle/fluid/operators/distributed_ops/allreduce_op.h b/paddle/fluid/operators/distributed_ops/allreduce_op.h index 89d02eb0a066c08b8979ce47c7e6e6b5b149c1c5..bbb674b5ebca2a5aec6d27412ac9cb61fcc0f169 100644 --- a/paddle/fluid/operators/distributed_ops/allreduce_op.h +++ b/paddle/fluid/operators/distributed_ops/allreduce_op.h @@ -33,8 +33,9 @@ class AllReduceOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto place = ctx.GetPlace(); - PADDLE_ENFORCE(is_gpu_place(place), - "AllReduce op can run on gpu place only for now."); + PADDLE_ENFORCE_EQ(is_gpu_place(place), true, + platform::errors::PreconditionNotMet( + "AllReduce op can run on gpu place only for now.")); #if defined(PADDLE_WITH_NCCL) auto& dev_ctx = ctx.template device_context(); auto in = ctx.Input("X"); @@ -49,7 +50,8 @@ class AllReduceOpKernel : public framework::OpKernel { auto* comm = dev_ctx.nccl_comm(); // FIXME(typhoonzero): should use nccl stream here. auto stream = dev_ctx.stream(); - PADDLE_ENFORCE_NOT_NULL(stream, "Should initialize NCCL firstly."); + PADDLE_ENFORCE_NOT_NULL( + stream, platform::errors::NotFound("Should initialize NCCL firstly.")); int reduce_type = ctx.Attr("reduce_type"); ncclRedOp_t red_type = ncclSum; @@ -67,7 +69,7 @@ class AllReduceOpKernel : public framework::OpKernel { red_type = ncclMin; break; } - PADDLE_ENFORCE(platform::dynload::ncclAllReduce( + PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclAllReduce( sendbuff, recvbuff, numel, static_cast(dtype), red_type, comm, stream)); if (ctx.Attr("sync_mode")) { diff --git a/paddle/fluid/operators/distributed_ops/broadcast_op.cc b/paddle/fluid/operators/distributed_ops/broadcast_op.cc index 6ae98af1e2ac1916b431e72b137e148d90df747f..535cf7014419292863a684eaaebbf15d367671ab 100644 --- a/paddle/fluid/operators/distributed_ops/broadcast_op.cc +++ b/paddle/fluid/operators/distributed_ops/broadcast_op.cc @@ -26,10 +26,12 @@ class BroadcastOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of BroadcastOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Output) of ConvOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of BroadcastOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Output) of ConvOp should not be null.")); } }; diff --git a/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc b/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc index 2d87b48ba80f279abf9a4c4c40691c5a285db38a..6a3adec39e8e7391aa2e40e424928be8df62db22 100644 --- a/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc +++ b/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc @@ -34,8 +34,10 @@ template class NCCLBroadcastOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "The place of ExecutionContext should be CUDAPlace."); + PADDLE_ENFORCE_EQ( + platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet( + "The place of ExecutionContext should be CUDAPlace.")); #if defined(PADDLE_WITH_NCCL) int dev_id = boost::get(ctx.GetPlace()).device; @@ -43,19 +45,22 @@ class NCCLBroadcastOpKernel : public framework::OpKernel { auto in = ctx.Input("X"); auto out = ctx.Output("Out"); - PADDLE_ENFORCE(out->IsInitialized(), - "Currently, the output of broadcast op must be initialized, " - "because this op can only be an In-Place operation."); + PADDLE_ENFORCE_EQ( + out->IsInitialized(), true, + platform::errors::PreconditionNotMet( + "Currently, the output of broadcast op must be initialized," + "because this op can only be an In-Place operation.")); void* send_recv_buffer = out->mutable_data(ctx.GetPlace()); PADDLE_ENFORCE_EQ( send_recv_buffer, in->data(), - "Currently, the broadcast op can only be an In-Place operation."); + platform::errors::PreconditionNotMet("Currently, the broadcast op can " + "only be an In-Place operation.")); auto& dev_ctx = ctx.template device_context(); auto comm = dev_ctx.nccl_comm(); auto stream = dev_ctx.stream(); - PADDLE_ENFORCE(platform::dynload::ncclBcast( + PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::ncclBcast( send_recv_buffer, static_cast(in->numel()), platform::ToNCCLDataType(in->type()), root_dev_id, comm, stream)); diff --git a/paddle/fluid/operators/eye_op.cc b/paddle/fluid/operators/eye_op.cc index aa7f7035ba36a70f3cf132e0aa004cf580fe256d..c182c6a5d768feacf7ad70a54406f84f311c3a9c 100644 --- a/paddle/fluid/operators/eye_op.cc +++ b/paddle/fluid/operators/eye_op.cc @@ -22,16 +22,20 @@ class EyeOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of EyeOP should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of EyeOP should not be null.")); auto num_rows = ctx->Attrs().Get("num_rows"); - PADDLE_ENFORCE(num_rows >= 0, - "The value of Input(num_rows) should be non-negative int."); + PADDLE_ENFORCE_EQ( + num_rows >= 0, true, + platform::errors::InvalidArgument( + "The value of Input(num_rows) should be non-negative int.")); auto num_columns = ctx->Attrs().Get("num_columns"); if (num_columns == -1) num_columns = num_rows; - PADDLE_ENFORCE( - num_columns >= 0, - "The value of Input(num_columns) should be non-negative int."); + PADDLE_ENFORCE_EQ( + num_columns >= 0, true, + platform::errors::InvalidArgument( + "The value of Input(num_columns) should be non-negative int.")); ctx->SetOutputDim("Out", {num_rows, num_columns}); } diff --git a/paddle/fluid/operators/gather.cu.h b/paddle/fluid/operators/gather.cu.h index b3264ec0ad3fa984726244d911dab6f7bd8e95b8..eff7fc72799e1dd7f736daf1f40adb01af243aaf 100644 --- a/paddle/fluid/operators/gather.cu.h +++ b/paddle/fluid/operators/gather.cu.h @@ -78,12 +78,14 @@ void GPUGather(const platform::DeviceContext& ctx, const Tensor& src, // check index of shape 1-D if (index.dims().size() == 1) { PADDLE_ENFORCE_GT(index.dims()[0], 0, - "The index of gather_op should not be empty when the " - "index's rank is 1."); + platform::errors::InvalidArgument( + "The index of gather_op should not be empty" + "when the index's rank is 1.")); } else if (index.dims().size() == 2) { PADDLE_ENFORCE_EQ(index.dims()[1], 1, - " If the index's rank of gather_op is 2, the second " - "dimension should be 1."); + platform::errors::InvalidArgument( + "If the index's rank of gather_op is 2," + " the second dimension should be 1.")); } int index_size = index.dims()[0]; diff --git a/paddle/fluid/operators/gather.h b/paddle/fluid/operators/gather.h index 26fb93c2ebb295fc73832d50c2f8472e96bcb25f..f5a7bffe4745360a307a4b7c61b30c871cf6c756 100644 --- a/paddle/fluid/operators/gather.h +++ b/paddle/fluid/operators/gather.h @@ -36,15 +36,23 @@ using framework::Tensor; template void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, const Tensor& index, Tensor* output) { - PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("It should be running on the CPU.")); // check index of shape 1-D if (index.dims().size() == 2) { - PADDLE_ENFORCE_EQ(index.dims()[1], 1, - "index.dims()[1] should be 1 when index.dims().size() == " - "2 in gather_op."); + PADDLE_ENFORCE_EQ( + index.dims()[1], 1, + platform::errors::InvalidArgument( + "index.dims()[1] should be 1 when index.dims().size() = 2" + "in gather_op, but received value is [%d].", + index.dims()[1])); } else { PADDLE_ENFORCE_EQ(index.dims().size(), 1, - "index.dims().size() should be 1 or 2 in gather_op."); + platform::errors::InvalidArgument( + "index.dims().size() should be 1 or 2 in gather_op," + "but received shape's size is [%d].", + index.dims().size())); } int64_t index_size = index.dims()[0]; @@ -69,8 +77,9 @@ void CPUGather(const platform::DeviceContext& ctx, const Tensor& src, template void CPUGatherNd(const platform::DeviceContext& ctx, const Tensor& input, const Tensor& index, Tensor* output) { - PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, - "It should be running on the CPU"); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("It should be running on the CPU.")); auto index_dims = index.dims(); auto index_dims_size = index_dims.size(); @@ -98,11 +107,14 @@ void CPUGatherNd(const platform::DeviceContext& ctx, const Tensor& input, int64_t temp = 1; for (int64_t j = end_size - 1; j >= 0; --j) { IndexT index_value = p_index[i * end_size + j]; - PADDLE_ENFORCE_LT(index_value, input_dims[j], - "Input(index[-1)] has wrong value, it is %d", - index_value); - PADDLE_ENFORCE_GE(index_value, 0UL, - "The value of Input(index) must be no less than 0"); + PADDLE_ENFORCE_LT( + index_value, input_dims[j], + platform::errors::InvalidArgument( + "Input(index[-1)] has wrong value, it is [%d]", index_value)); + PADDLE_ENFORCE_GE( + index_value, 0UL, + platform::errors::InvalidArgument( + "The value of Input(index) must be no less than 0")); index_ += (index_value * temp); temp *= input_dims[j]; diff --git a/paddle/fluid/operators/gather_nd_op.cc b/paddle/fluid/operators/gather_nd_op.cc index 28daa3b3dd00c6c2987edc32ee6a5395862aa2b8..e058564b426c8bd4d96dd3ce778b59b407d99636 100644 --- a/paddle/fluid/operators/gather_nd_op.cc +++ b/paddle/fluid/operators/gather_nd_op.cc @@ -27,11 +27,14 @@ class GatherNdOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, - "Input(X) of GatherNdOp should not be null."); + platform::errors::InvalidArgument( + "Input(X) of GatherNdOp should not be null.")); PADDLE_ENFORCE_EQ(ctx->HasInput("Index"), true, - "Input(Index) of GatherNdOp should not be null."); + platform::errors::InvalidArgument( + "Input(Index) of GatherNdOp should not be null.")); PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, - "Output(Out) of GatherNdOp should not be null."); + platform::errors::InvalidArgument( + "Output(Out) of GatherNdOp should not be null.")); auto x_dims = ctx->GetInputDim("X"); auto x_dims_size = x_dims.size(); @@ -40,9 +43,11 @@ class GatherNdOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_LE( index_dims[index_dims_size - 1], x_dims_size, - "Input(Index).shape[-1] should be no greater than Input(X).rank"); + platform::errors::InvalidArgument( + "Input(Index).shape[-1] should be no greater than Input(X).rank")); PADDLE_ENFORCE_GE(index_dims_size, 2UL, - "The rank of Input(Index) should be greater than 1"); + platform::errors::InvalidArgument( + "The rank of Input(Index) should be greater than 1")); std::vector result_dims; // The result dims is diff --git a/paddle/fluid/operators/gather_nd_op.cu b/paddle/fluid/operators/gather_nd_op.cu index 68f54a511597ede90427bcb24251cc770ceb5add..ee51df68c18e5748f3b01045709e040401130d69 100644 --- a/paddle/fluid/operators/gather_nd_op.cu +++ b/paddle/fluid/operators/gather_nd_op.cu @@ -25,7 +25,8 @@ class GatherNdOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "This kernel only runs on GPU device."); + platform::errors::PreconditionNotMet( + "This kernel only runs on GPU device.")); auto *x = ctx.Input("X"); auto *index = ctx.Input("Index"); auto *output = ctx.Output("Out"); @@ -35,12 +36,15 @@ class GatherNdOpCUDAKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE_EQ( - index_type_match, true, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s], but " + "desires to be [%s] or [%s].", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { GPUGatherNd(ctx, *x, *index, output); } else if (index_type == framework::proto::VarType::INT64) { @@ -54,7 +58,8 @@ class GatherNdGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, - "This kernel only runs on GPU device."); + platform::errors::PreconditionNotMet( + "This kernel only runs on GPU device.")); auto *index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); @@ -70,12 +75,15 @@ class GatherNdGradOpCUDAKernel : public framework::OpKernel { bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE_EQ( - index_type_match, true, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s].", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { GPUScatterNdAdd(ctx, *dO, *index, dX); diff --git a/paddle/fluid/operators/gather_nd_op.h b/paddle/fluid/operators/gather_nd_op.h index 059ca54c468663686abf0270dedfca727689b6db..e89db1e8732e266061579d1860c1e407506e5c40 100644 --- a/paddle/fluid/operators/gather_nd_op.h +++ b/paddle/fluid/operators/gather_nd_op.h @@ -27,8 +27,9 @@ template class GatherNdOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, - "This kernel only runs on CPU."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("This kernel only runs on CPU.")); auto *x = ctx.Input("X"); auto *index = ctx.Input("Index"); @@ -40,12 +41,15 @@ class GatherNdOpKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE_EQ( - index_type_match, true, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s]", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { CPUGatherNd(ctx.device_context(), *x, *index, output); } else if (index_type == framework::proto::VarType::INT64) { @@ -58,8 +62,9 @@ template class GatherNdGradOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, - "This kernel only runs on CPU."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("This kernel only runs on CPU.")); auto *index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); @@ -73,12 +78,15 @@ class GatherNdGradOpKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE_EQ( - index_type_match, true, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s]", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { ScatterNdAdd(ctx, *dO, *index, dX); } else if (index_type == framework::proto::VarType::INT64) { diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index 015ff0713c2063b8c66a523bdf810f38712c815f..9b0762837303a26c50bca762a790720dd6e687ad 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -26,12 +26,15 @@ class GatherOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of GatherOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Index"), - "Input(Index) of GatherOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of GatherOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, + platform::errors::InvalidArgument( + "Input(X) of GatherOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Index"), true, + platform::errors::InvalidArgument( + "Input(Index) of GatherOp should not be null.")); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, + platform::errors::InvalidArgument( + "Output(Out) of GatherOp should not be null.")); auto index_dims = ctx->GetInputDim("Index"); PADDLE_ENFORCE(index_dims.size() == 1 || diff --git a/paddle/fluid/operators/gather_op.cu b/paddle/fluid/operators/gather_op.cu index 061f92c76c32fbc599bd8f5d32bb110c276d748f..5bef547c0542b922f646f72ffb7310ef4eb279e9 100644 --- a/paddle/fluid/operators/gather_op.cu +++ b/paddle/fluid/operators/gather_op.cu @@ -24,8 +24,9 @@ template class GatherOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "This kernel only runs on GPU device."); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet( + "This kernel only runs on GPU device.")); auto *x = ctx.Input("X"); auto *index = ctx.Input("Index"); auto *output = ctx.Output("Out"); @@ -35,12 +36,15 @@ class GatherOpCUDAKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE( - index_type_match, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s].", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { GPUGather(ctx.device_context(), *x, *index, output); } else if (index_type == framework::proto::VarType::INT64) { @@ -53,8 +57,9 @@ template class GatherGradOpCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), - "This kernel only runs on GPU device."); + PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet( + "This kernel only runs on GPU device.")); auto *index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); auto *dO = ctx.Input(framework::GradVarName("Out")); @@ -69,12 +74,15 @@ class GatherGradOpCUDAKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE( - index_type_match, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s].", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { GPUScatterAssign(ctx, *dO, *index, dX, ctx.Attr("overwrite")); diff --git a/paddle/fluid/operators/gather_op.h b/paddle/fluid/operators/gather_op.h index 852790a4c63c85d89dd19a870fa84991798219eb..e4ce13ca8fc0b49e997749d0f47f15213a3b44f7 100644 --- a/paddle/fluid/operators/gather_op.h +++ b/paddle/fluid/operators/gather_op.h @@ -27,8 +27,9 @@ template class GatherOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("This kernel only runs on CPU.")); auto *x = ctx.Input("X"); auto *index = ctx.Input("Index"); @@ -40,12 +41,15 @@ class GatherOpKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE( - index_type_match, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s].", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { CPUGather(ctx.device_context(), *x, *index, output); } else if (index_type == framework::proto::VarType::INT64) { @@ -58,8 +62,9 @@ template class GatherGradientOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "This kernel only runs on CPU."); + PADDLE_ENFORCE_EQ( + platform::is_cpu_place(ctx.GetPlace()), true, + platform::errors::PreconditionNotMet("This kernel only runs on CPU.")); auto *index = ctx.Input("Index"); auto *dX = ctx.Output(framework::GradVarName("X")); @@ -76,12 +81,15 @@ class GatherGradientOpKernel : public framework::OpKernel { const auto &index_type = index->type(); bool index_type_match = index_type == framework::proto::VarType::INT32 || index_type == framework::proto::VarType::INT64; - PADDLE_ENFORCE( - index_type_match, - "Index holds the wrong type, it holds %s, but desires to be %s or %s", - paddle::framework::DataTypeToString(index_type), - paddle::framework::DataTypeToString(framework::proto::VarType::INT32), - paddle::framework::DataTypeToString(framework::proto::VarType::INT64)); + PADDLE_ENFORCE_EQ(index_type_match, true, + platform::errors::InvalidArgument( + "Index holds the wrong type, it holds [%s]," + "but desires to be [%s] or [%s].", + paddle::framework::DataTypeToString(index_type), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT32), + paddle::framework::DataTypeToString( + framework::proto::VarType::INT64))); if (index_type == framework::proto::VarType::INT32) { if (overwrite) { ScatterAssign(ctx.device_context(), *dO, *index, dX);