From c3c61d34c1c33ef6aad003cfb17a1a53a2e3d08c Mon Sep 17 00:00:00 2001 From: lidanqing Date: Tue, 26 May 2020 07:27:16 +0200 Subject: [PATCH] Update PADDLE_ENFORCE in DNNL related ops (#24333) * Update PADDLE_ENFORCE in DNNL related ops test=develop * Abstract macro of OP_GET_PLACE_CHECK test=develop * update according to reviews * update GET_PLACE_CPU_CHECK * fix typo test=develop * revert macro test=develop --- .../operators/mkldnn/activation_mkldnn_op.cc | 5 ++-- .../operators/mkldnn/batch_norm_mkldnn_op.cc | 12 +++++++-- .../fluid/operators/mkldnn/conv_mkldnn_op.cc | 11 ++++---- .../mkldnn/conv_transpose_mkldnn_op.cc | 6 ++--- .../fluid/operators/mkldnn/lrn_mkldnn_op.cc | 26 ++++++++++++------- .../operators/mkldnn/mkldnn_activation_op.h | 8 ++---- .../fluid/operators/mkldnn/mul_mkldnn_op.cc | 6 ++--- .../fluid/operators/mkldnn/pool_mkldnn_op.cc | 11 ++++---- .../operators/mkldnn/softmax_mkldnn_op.cc | 6 ++--- .../fluid/operators/mkldnn/sum_mkldnn_op.cc | 5 ++-- .../operators/mkldnn/transpose_mkldnn_op.cc | 10 ++++--- paddle/fluid/platform/mkldnn_reuse.h | 7 +++-- 12 files changed, 66 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index c8e81362c3..86fe40c4f6 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -62,8 +62,9 @@ class MKLDNNActivationGradKernel template void eltwise_forward(const framework::ExecutionContext &ctx, mkldnn::algorithm algorithm) { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL eletwise_forward must use CPUPlace")); auto &dev_ctx = ctx.template device_context(); const auto *x = ctx.Input("X"); diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index b7be004525..fde4900c6d 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -144,7 +144,11 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { auto src_tz = paddle::framework::vectorize(x->dims()); auto scale_tz = paddle::framework::vectorize(scale->dims()); - PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); + PADDLE_ENFORCE_EQ( + scale_tz.size(), 1, + platform::errors::InvalidArgument( + "Dims of scale tensor must be 1, but received scale's size is %d", + scale_tz.size())); const unsigned int C = scale_tz[0]; // MKLDNN requires a single piece of memory for scale and shift/bias data @@ -248,7 +252,11 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { auto src_tz = paddle::framework::vectorize(x->dims()); auto scale_tz = paddle::framework::vectorize(scale->dims()); - PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); + PADDLE_ENFORCE_EQ( + scale_tz.size(), 1, + platform::errors::InvalidArgument( + "Dims of scale tensor must be 1, but received scale's size is %d", + scale_tz.size())); const unsigned int C = scale_tz[0]; diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index c6f782046c..a01bf8f9b9 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -94,8 +94,9 @@ template class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - platform::errors::InvalidArgument("It must use CPUPlace.")); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Conv must use CPUPlace")); bool is_INT8 = std::is_same::value || std::is_same::value; if (!is_INT8) { @@ -784,9 +785,9 @@ template class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - platform::errors::InvalidArgument("It must use CPUPlace.")); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL ConvGrad must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index bed0885c0d..48279658c8 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -29,9 +29,9 @@ template class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - platform::errors::InvalidArgument("It must use CPUPlace.")); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL ConvTranspose must use CPUPlace")); const bool is_test = ctx.Attr("is_test"); PADDLE_ENFORCE_EQ(is_test, true, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc index 5b025fa11e..817711f315 100644 --- a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc @@ -27,10 +27,12 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { const bool is_float_type = std::is_same::value; - PADDLE_ENFORCE(is_float_type, "MKLDNN LRN must use float data."); - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "MKLDNN LRN must use CPUPlace."); - + PADDLE_ENFORCE_EQ( + is_float_type, true, + platform::errors::PreconditionNotMet("DNNL LRN must use float data.")); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL LRN must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); auto x = ctx.Input("X"); @@ -93,12 +95,16 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { const bool is_float_type = std::is_same::value; - PADDLE_ENFORCE(is_float_type, "MKLDNN LRN must use float data."); - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "MKLDNN LRN must use CPUPlace."); - PADDLE_ENFORCE( - !ctx.Attr("is_test"), - "is_test attribute should be set to False in training phase."); + PADDLE_ENFORCE_EQ(is_float_type, true, + platform::errors::PreconditionNotMet( + "DNNL LRN GradOpKernl must use float data.")); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL LRNGrad must use CPUPlace")); + PADDLE_ENFORCE_EQ( + ctx.Attr("is_test"), false, + platform::errors::PreconditionNotMet( + "is_test attribute should be set to False in training phase.")); auto x = ctx.Input("X"); auto mid = ctx.Input("MidOut"); diff --git a/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h index 6c294a9518..a7a4f9c697 100644 --- a/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h +++ b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h @@ -30,12 +30,8 @@ class MKLDNNActivationKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - PADDLE_ENFORCE(context.Input("X") != nullptr, - "Cannot get input tensor X, variable name = %s", - context.InputName("X")); - PADDLE_ENFORCE(context.Output("Out") != nullptr, - "Cannot find output tensor Out, variable name = %s", - context.OutputName("Out")); + OP_INOUT_CHECK(context.HasInput("X"), "Input", "X", "Activation"); + OP_INOUT_CHECK(context.HasInput("Out"), "Output", "Out", "Activation"); Functor functor; auto attrs = functor.GetAttrs(); diff --git a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc index 862d6508c5..1dd1ad1178 100644 --- a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc @@ -333,9 +333,9 @@ template class MulMKLDNNKernel : public framework::OpKernel { public: void Compute(const ExecutionContext &ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Mul must use CPUPlace")); auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 4164e067e5..9669a966cc 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -33,8 +33,9 @@ template class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Pool must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); @@ -117,9 +118,9 @@ template class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL PoolGrad must use CPUPlace")); const Tensor* in_x = ctx.Input("X"); const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); Tensor* in_x_grad = ctx.Output(framework::GradVarName("X")); diff --git a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc index 51bc534bff..4d825e4ee2 100644 --- a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc @@ -129,9 +129,9 @@ template class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL SoftmaxGrad must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const Tensor* output = ctx.Input("Out"); auto* dout = ctx.template Input(framework::GradVarName("Out")); diff --git a/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc index 0bee1a6c8b..1e0e13abb7 100644 --- a/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc @@ -49,8 +49,9 @@ template class SumMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Sum must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); auto in_vars = ctx.MultiInputVar("X"); diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index 9e9b2fb158..398bdb01b5 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -28,8 +28,9 @@ template class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Transpose must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); @@ -73,8 +74,9 @@ template class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL TransposeGrad must use CPUPlace")); auto* out_grad = ctx.Input(framework::GradVarName("Out")); auto* x_grad = ctx.Output(framework::GradVarName("X")); diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 4248a2b859..8de2416ea9 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -1190,8 +1190,11 @@ static std::shared_ptr SetDstMemory( const std::shared_ptr& handler, std::vector* pipeline) { const T* residual_param_data = residual_param->data(); - PADDLE_ENFORCE(residual_param_data != nullptr, - "Provide data if you want MKLDNN conv+elementwise_add fusion"); + PADDLE_ENFORCE_NOT_NULL( + residual_param_data, + platform::errors::PreconditionNotMet("Residual parameter is required for " + "the DNNL conv+elementwise_add " + "fusion, but now it is missing")); std::shared_ptr user_residual_memory_p = handler->AcquireResidualDataMemory(user_residual_md, to_void_cast(residual_param_data)); -- GitLab