From 632bc1f2537ef1ebbe80de2d2f00f874ff2f0b8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awomir=20Siwek?= Date: Mon, 29 Aug 2022 13:11:18 +0200 Subject: [PATCH] [PHI] Migrate relu6 and abs kernels (#45397) * abs relu6 fwd * abs bwd * gaussian_random_kernel and mkldnn-onednn renaming * scale kernel * whitespace * whitespace * revert scale migration * whitespaces * revert changes to gaussian kernel * whitespaces --- .../operators/mkldnn/activation_mkldnn_op.cc | 78 +++++-------------- paddle/fluid/platform/mkldnn_reuse.h | 2 +- .../kernels/funcs/data_layout_transform.cc | 4 +- .../{mkldnn_helper.h => onednn_helper.h} | 0 .../onednn/{mkldnn_reuse.h => onednn_reuse.h} | 2 +- .../kernels/onednn/activation_grad_kernel.cc | 8 +- .../phi/kernels/onednn/activation_kernel.cc | 15 +++- paddle/phi/kernels/transfer_layout_kernel.cc | 2 +- 8 files changed, 44 insertions(+), 67 deletions(-) rename paddle/phi/kernels/funcs/onednn/{mkldnn_helper.h => onednn_helper.h} (100%) rename paddle/phi/kernels/funcs/onednn/{mkldnn_reuse.h => onednn_reuse.h} (99%) diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index 6fba33e10f..728d86cd94 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -144,13 +144,6 @@ void eltwise_grad_use_out(const framework::ExecutionContext &ctx, dx->set_mem_desc(diff_src_memory_p->get_desc()); } -template -struct MKLDNNActivationFunc : public BaseActivationFunctor { - void operator()(const framework::ExecutionContext &ctx) const { - eltwise_forward(ctx, algorithm); - } -}; - template struct MKLDNNActivationGradFunc : public BaseActivationFunctor { void operator()(const framework::ExecutionContext &ctx) const { @@ -158,13 +151,6 @@ struct MKLDNNActivationGradFunc : public BaseActivationFunctor { } }; -template -struct MKLDNNActivationGradUseOutFunc : public BaseActivationFunctor { - void operator()(const framework::ExecutionContext &ctx) const { - eltwise_grad_use_out(ctx, algorithm); - } -}; - template struct GeluMKLDNNFunctor : public BaseActivationFunctor { void operator()(const framework::ExecutionContext &ctx) const { @@ -196,59 +182,33 @@ struct SoftplusMKLDNNFunctor : public BaseActivationFunctor { } }; -template -using Relu6MKLDNNFunctor = - MKLDNNActivationFunc; - -template -using AbsMKLDNNFunctor = MKLDNNActivationFunc; - template using Relu6MKLDNNGradFunctor = MKLDNNActivationGradFunc; -template -using AbsMKLDNNGradFunctor = - MKLDNNActivationGradFunc; - } // namespace operators } // namespace paddle namespace ops = paddle::operators; -#define REGISTER_ACTIVATION_MKLDNN_KERNEL(act_type, functor, grad_functor) \ - REGISTER_OP_KERNEL( \ - act_type, \ - MKLDNN, \ - ::paddle::platform::CPUPlace, \ - ops::MKLDNNActivationKernel>, \ - ops::MKLDNNActivationKernel>); \ - REGISTER_OP_KERNEL( \ - act_type##_grad, \ - MKLDNN, \ - ::paddle::platform::CPUPlace, \ - ops::MKLDNNActivationGradKernel>, \ - ops::MKLDNNActivationGradKernel< \ +#define REGISTER_FWD_ACTIVATION_MKLDNN_KERNEL(act_type, functor) \ + REGISTER_OP_KERNEL( \ + act_type, \ + MKLDNN, \ + ::paddle::platform::CPUPlace, \ + ops::MKLDNNActivationKernel>, \ + ops::MKLDNNActivationKernel>); + +#define REGISTER_GRAD_ACTIVATION_MKLDNN_KERNEL(act_type, grad_functor) \ + REGISTER_OP_KERNEL( \ + act_type##_grad, \ + MKLDNN, \ + ::paddle::platform::CPUPlace, \ + ops::MKLDNNActivationGradKernel>, \ + ops::MKLDNNActivationGradKernel< \ ops::grad_functor>); -#define REGISTER_ACTIVATION_MKLDNN_KERNEL_FWD_ONLY(act_type, functor) \ - REGISTER_OP_KERNEL(act_type, \ - MKLDNN, \ - ::paddle::platform::CPUPlace, \ - ops::MKLDNNActivationKernel>); - -#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \ - __macro(abs, AbsMKLDNNFunctor, AbsMKLDNNGradFunctor); \ - __macro(gelu, GeluMKLDNNFunctor, GeluMKLDNNGradFunctor); \ - __macro(relu6, Relu6MKLDNNFunctor, Relu6MKLDNNGradFunctor); - -FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL); - -namespace ops = paddle::operators; -REGISTER_OP_KERNEL( - softplus, - MKLDNN, - paddle::platform::CPUPlace, - ops::MKLDNNActivationKernel>, - ops::MKLDNNActivationKernel< - ops::SoftplusMKLDNNFunctor>); +REGISTER_FWD_ACTIVATION_MKLDNN_KERNEL(softplus, SoftplusMKLDNNFunctor); +REGISTER_FWD_ACTIVATION_MKLDNN_KERNEL(gelu, GeluMKLDNNFunctor); +REGISTER_GRAD_ACTIVATION_MKLDNN_KERNEL(gelu, GeluMKLDNNGradFunctor); +REGISTER_GRAD_ACTIVATION_MKLDNN_KERNEL(relu6, Relu6MKLDNNGradFunctor); diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 7f6f4ff31f..32c31ef696 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/fluid/operators/pool_op.h" #include "paddle/fluid/platform/mkldnn_helper.h" #include "paddle/fluid/platform/place.h" -#include "paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_reuse.h" namespace paddle { namespace platform { diff --git a/paddle/phi/kernels/funcs/data_layout_transform.cc b/paddle/phi/kernels/funcs/data_layout_transform.cc index 800d67583e..5b8eb677fc 100644 --- a/paddle/phi/kernels/funcs/data_layout_transform.cc +++ b/paddle/phi/kernels/funcs/data_layout_transform.cc @@ -25,8 +25,8 @@ #include "paddle/phi/core/dense_tensor.h" #ifdef PADDLE_WITH_MKLDNN -#include "paddle/phi/kernels/funcs/onednn/mkldnn_helper.h" -#include "paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_helper.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_reuse.h" #endif namespace phi { diff --git a/paddle/phi/kernels/funcs/onednn/mkldnn_helper.h b/paddle/phi/kernels/funcs/onednn/onednn_helper.h similarity index 100% rename from paddle/phi/kernels/funcs/onednn/mkldnn_helper.h rename to paddle/phi/kernels/funcs/onednn/onednn_helper.h diff --git a/paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h b/paddle/phi/kernels/funcs/onednn/onednn_reuse.h similarity index 99% rename from paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h rename to paddle/phi/kernels/funcs/onednn/onednn_reuse.h index 56f2da3b3b..7384752c88 100644 --- a/paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h +++ b/paddle/phi/kernels/funcs/onednn/onednn_reuse.h @@ -25,7 +25,7 @@ limitations under the License. */ #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/dense_tensor.h" -#include "paddle/phi/kernels/funcs/onednn/mkldnn_helper.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_helper.h" namespace phi { namespace funcs { diff --git a/paddle/phi/kernels/onednn/activation_grad_kernel.cc b/paddle/phi/kernels/onednn/activation_grad_kernel.cc index 2eff072e64..b7a0010a6e 100644 --- a/paddle/phi/kernels/onednn/activation_grad_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_grad_kernel.cc @@ -19,7 +19,7 @@ #include "paddle/phi/common/place.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/activation_functor.h" -#include "paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_reuse.h" namespace phi { @@ -147,6 +147,10 @@ struct MKLDNNActivationGradUseOutFunc : public funcs::BaseActivationFunctor { } }; +template +using AbsMKLDNNGradFunctor = + MKLDNNActivationGradFunc; + template using ReluMKLDNNGradFunctor = MKLDNNActivationGradFunc; @@ -193,6 +197,7 @@ DEFINE_ONEDNN_ACTIVATION_GRAD_KERNEL_DEPOUT(Sqrt, SqrtMKLDNNGradUseOutFunctor); DEFINE_ONEDNN_ACTIVATION_GRAD_KERNEL_DEPOUT(Sigmoid, SigmoidMKLDNNGradUseOutFunctor); DEFINE_ONEDNN_ACTIVATION_GRAD_KERNEL_DEPOUT(Exp, ExpMKLDNNGradUseOutFunctor); +DEFINE_ONEDNN_ACTIVATION_GRAD_KERNEL_DEPOUT(Abs, AbsMKLDNNGradFunctor); DEFINE_ONEDNN_ACTIVATION_GRAD_KERNEL_DEPOUT(Relu, ReluMKLDNNGradFunctor); DEFINE_ONEDNN_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(LeakyRelu, @@ -240,6 +245,7 @@ PD_REGISTER_KERNEL(relu_grad, PD_REGISTER_KERNEL( \ name, OneDNN, ALL_LAYOUT, phi::func, float, phi::dtype::bfloat16) {} +PD_REGISTER_ACTIVATION_GRAD_KERNEL(abs_grad, AbsGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(elu_grad, EluGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(exp_grad, ExpGradKernel) PD_REGISTER_ACTIVATION_GRAD_KERNEL(hard_swish_grad, HardSwishGradKernel) diff --git a/paddle/phi/kernels/onednn/activation_kernel.cc b/paddle/phi/kernels/onednn/activation_kernel.cc index fa0af71d39..4d1b17b7aa 100644 --- a/paddle/phi/kernels/onednn/activation_kernel.cc +++ b/paddle/phi/kernels/onednn/activation_kernel.cc @@ -19,7 +19,7 @@ #include "paddle/phi/common/place.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/activation_functor.h" -#include "paddle/phi/kernels/funcs/onednn/mkldnn_reuse.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_reuse.h" namespace phi { @@ -88,10 +88,17 @@ struct MKLDNNActivationFunc : public funcs::BaseActivationFunctor { } }; +template +using AbsMKLDNNFunctor = MKLDNNActivationFunc; + template using ReluMKLDNNFunctor = MKLDNNActivationFunc; +template +using Relu6MKLDNNFunctor = + MKLDNNActivationFunc; + template using SwishMKLDNNFunctor = MKLDNNActivationFunc; @@ -126,6 +133,7 @@ template using RoundMKLDNNFunctor = MKLDNNActivationFunc; +DEFINE_ONEDNN_ACTIVATION_KERNEL(Abs, AbsMKLDNNFunctor) DEFINE_ONEDNN_ACTIVATION_KERNEL(Relu, ReluMKLDNNFunctor) DEFINE_ONEDNN_ACTIVATION_KERNEL(Tanh, TanhMKLDNNFunctor) DEFINE_ONEDNN_ACTIVATION_KERNEL(Exp, ExpMKLDNNFunctor) @@ -137,6 +145,7 @@ DEFINE_ONEDNN_ACTIVATION_KERNEL(Round, RoundMKLDNNFunctor) DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS(LeakyRelu, ReluMKLDNNFunctor, alpha) DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS(Mish, MishMKLDNNFunctor, threshold) DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS(Elu, EluMKLDNNFunctor, alpha) +DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS(Relu6, Relu6MKLDNNFunctor, threshold) DEFINE_ONEDNN_ACT_KERNEL_WITH_ONE_ATTRS(Swish, SwishMKLDNNFunctor, beta) template @@ -158,13 +167,15 @@ PD_REGISTER_KERNEL(round, OneDNN, ALL_LAYOUT, phi::RoundKernel, float) {} PD_REGISTER_KERNEL( \ name, OneDNN, ALL_LAYOUT, phi::func, float, phi::dtype::bfloat16) {} +PD_REGISTER_ACTIVATION_KERNEL(abs, AbsKernel) PD_REGISTER_ACTIVATION_KERNEL(elu, EluKernel) PD_REGISTER_ACTIVATION_KERNEL(exp, ExpKernel) PD_REGISTER_ACTIVATION_KERNEL(hard_swish, HardSwishKernel) PD_REGISTER_ACTIVATION_KERNEL(leaky_relu, LeakyReluKernel) PD_REGISTER_ACTIVATION_KERNEL(mish, MishKernel) +PD_REGISTER_ACTIVATION_KERNEL(relu, ReluKernel) +PD_REGISTER_ACTIVATION_KERNEL(relu6, Relu6Kernel) PD_REGISTER_ACTIVATION_KERNEL(sigmoid, SigmoidKernel) PD_REGISTER_ACTIVATION_KERNEL(sqrt, SqrtKernel) PD_REGISTER_ACTIVATION_KERNEL(swish, SwishKernel) PD_REGISTER_ACTIVATION_KERNEL(tanh, TanhKernel) -PD_REGISTER_ACTIVATION_KERNEL(relu, ReluKernel) diff --git a/paddle/phi/kernels/transfer_layout_kernel.cc b/paddle/phi/kernels/transfer_layout_kernel.cc index 2110a06f16..0e8faa64ad 100644 --- a/paddle/phi/kernels/transfer_layout_kernel.cc +++ b/paddle/phi/kernels/transfer_layout_kernel.cc @@ -23,7 +23,7 @@ limitations under the License. */ #include "paddle/phi/kernels/funcs/data_layout_transform.h" #include "paddle/phi/kernels/funcs/math_function.h" #ifdef PADDLE_WITH_MKLDNN -#include "paddle/phi/kernels/funcs/onednn/mkldnn_helper.h" +#include "paddle/phi/kernels/funcs/onednn/onednn_helper.h" #endif namespace phi { -- GitLab