diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index c8e81362c3fa967b600af9af2f6f5490e648dda0..86fe40c4f6a825116cdf8fe884ae06cc3e7bbc34 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -62,8 +62,9 @@ class MKLDNNActivationGradKernel template void eltwise_forward(const framework::ExecutionContext &ctx, mkldnn::algorithm algorithm) { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL eletwise_forward must use CPUPlace")); auto &dev_ctx = ctx.template device_context(); const auto *x = ctx.Input("X"); diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index b7be0045258e7aafb64912f2cc75c9c9e05413b6..fde4900c6d3c876151adae061182277482899739 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -144,7 +144,11 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { auto src_tz = paddle::framework::vectorize(x->dims()); auto scale_tz = paddle::framework::vectorize(scale->dims()); - PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); + PADDLE_ENFORCE_EQ( + scale_tz.size(), 1, + platform::errors::InvalidArgument( + "Dims of scale tensor must be 1, but received scale's size is %d", + scale_tz.size())); const unsigned int C = scale_tz[0]; // MKLDNN requires a single piece of memory for scale and shift/bias data @@ -248,7 +252,11 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { auto src_tz = paddle::framework::vectorize(x->dims()); auto scale_tz = paddle::framework::vectorize(scale->dims()); - PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); + PADDLE_ENFORCE_EQ( + scale_tz.size(), 1, + platform::errors::InvalidArgument( + "Dims of scale tensor must be 1, but received scale's size is %d", + scale_tz.size())); const unsigned int C = scale_tz[0]; diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index c6f782046c95271aa4c63106ca3bd00617eaf43c..a01bf8f9b9cfc04d69d5acf8316a2d1f68142ee5 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -94,8 +94,9 @@ template class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - platform::errors::InvalidArgument("It must use CPUPlace.")); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Conv must use CPUPlace")); bool is_INT8 = std::is_same::value || std::is_same::value; if (!is_INT8) { @@ -784,9 +785,9 @@ template class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - platform::errors::InvalidArgument("It must use CPUPlace.")); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL ConvGrad must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index bed0885c0d262da9f8c964da86a5f5aa3ea9d50b..48279658c80e93428f940c40e61d7b9af23f4ee3 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -29,9 +29,9 @@ template class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - platform::errors::InvalidArgument("It must use CPUPlace.")); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL ConvTranspose must use CPUPlace")); const bool is_test = ctx.Attr("is_test"); PADDLE_ENFORCE_EQ(is_test, true, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc index 5b025fa11e3f306597fc0888dd3b7ff798606b41..817711f3157b1bd1e5fda335c62f6e04c486e479 100644 --- a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc @@ -27,10 +27,12 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { const bool is_float_type = std::is_same::value; - PADDLE_ENFORCE(is_float_type, "MKLDNN LRN must use float data."); - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "MKLDNN LRN must use CPUPlace."); - + PADDLE_ENFORCE_EQ( + is_float_type, true, + platform::errors::PreconditionNotMet("DNNL LRN must use float data.")); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL LRN must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); auto x = ctx.Input("X"); @@ -93,12 +95,16 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { const bool is_float_type = std::is_same::value; - PADDLE_ENFORCE(is_float_type, "MKLDNN LRN must use float data."); - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "MKLDNN LRN must use CPUPlace."); - PADDLE_ENFORCE( - !ctx.Attr("is_test"), - "is_test attribute should be set to False in training phase."); + PADDLE_ENFORCE_EQ(is_float_type, true, + platform::errors::PreconditionNotMet( + "DNNL LRN GradOpKernl must use float data.")); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL LRNGrad must use CPUPlace")); + PADDLE_ENFORCE_EQ( + ctx.Attr("is_test"), false, + platform::errors::PreconditionNotMet( + "is_test attribute should be set to False in training phase.")); auto x = ctx.Input("X"); auto mid = ctx.Input("MidOut"); diff --git a/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h index 6c294a9518653ed6de6b8699cfc44c4539661fde..a7a4f9c6975b3c7220c1922dd3fbcb0e03ab163c 100644 --- a/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h +++ b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h @@ -30,12 +30,8 @@ class MKLDNNActivationKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - PADDLE_ENFORCE(context.Input("X") != nullptr, - "Cannot get input tensor X, variable name = %s", - context.InputName("X")); - PADDLE_ENFORCE(context.Output("Out") != nullptr, - "Cannot find output tensor Out, variable name = %s", - context.OutputName("Out")); + OP_INOUT_CHECK(context.HasInput("X"), "Input", "X", "Activation"); + OP_INOUT_CHECK(context.HasInput("Out"), "Output", "Out", "Activation"); Functor functor; auto attrs = functor.GetAttrs(); diff --git a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc index 862d6508c5132ea71930fe2a62a8d33ac7036246..1dd1ad117862d92aa8d358f04f8b03fec7abafff 100644 --- a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc @@ -333,9 +333,9 @@ template class MulMKLDNNKernel : public framework::OpKernel { public: void Compute(const ExecutionContext &ctx) const override { - PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Mul must use CPUPlace")); auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 4164e067e5dfffbd3e4166ac642d8ae1e20fb186..9669a966cc0c68521800d29a6ccdbd86f6e7c5ba 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -33,8 +33,9 @@ template class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Pool must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); @@ -117,9 +118,9 @@ template class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL PoolGrad must use CPUPlace")); const Tensor* in_x = ctx.Input("X"); const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); Tensor* in_x_grad = ctx.Output(framework::GradVarName("X")); diff --git a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc index 51bc534bff27c48d7f24c82057008a2367dd073a..4d825e4ee279bc2c505cfabff1917d1a5319d1dd 100644 --- a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc @@ -129,9 +129,9 @@ template class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL SoftmaxGrad must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const Tensor* output = ctx.Input("Out"); auto* dout = ctx.template Input(framework::GradVarName("Out")); diff --git a/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc index 0bee1a6c8b5d64acba1e6464066000b961af51a1..1e0e13abb7c641d441b9c6188f7b9103c4ec7292 100644 --- a/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/sum_mkldnn_op.cc @@ -49,8 +49,9 @@ template class SumMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Sum must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); auto in_vars = ctx.MultiInputVar("X"); diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index 9e9b2fb15827c4323abbe615148acd4ecb5da784..398bdb01b5c240f704982ec5a75e21677f1ef611 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -28,8 +28,9 @@ template class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL Transpose must use CPUPlace")); auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); @@ -73,8 +74,9 @@ template class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); + PADDLE_ENFORCE_EQ(platform::is_cpu_place(ctx.GetPlace()), true, + paddle::platform::errors::PreconditionNotMet( + "Operator DNNL TransposeGrad must use CPUPlace")); auto* out_grad = ctx.Input(framework::GradVarName("Out")); auto* x_grad = ctx.Output(framework::GradVarName("X")); diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 4248a2b859f63817291089524794804f6dfdcd04..8de2416ea915a946ca69877f8e48e28c25b6c5a5 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -1190,8 +1190,11 @@ static std::shared_ptr SetDstMemory( const std::shared_ptr& handler, std::vector* pipeline) { const T* residual_param_data = residual_param->data(); - PADDLE_ENFORCE(residual_param_data != nullptr, - "Provide data if you want MKLDNN conv+elementwise_add fusion"); + PADDLE_ENFORCE_NOT_NULL( + residual_param_data, + platform::errors::PreconditionNotMet("Residual parameter is required for " + "the DNNL conv+elementwise_add " + "fusion, but now it is missing")); std::shared_ptr user_residual_memory_p = handler->AcquireResidualDataMemory(user_residual_md, to_void_cast(residual_param_data));