未验证 提交 74c0bc1c 编写于 作者: J jakpiase 提交者: GitHub

added round fwd onednn kernel (#39653)

上级 b95cd3b7
......@@ -262,6 +262,10 @@ using EluMKLDNNFunctor = MKLDNNActivationFunc<T, dnnl::algorithm::eltwise_elu>;
template <typename T>
using ExpMKLDNNFunctor = MKLDNNActivationFunc<T, dnnl::algorithm::eltwise_exp>;
template <typename T>
using RoundMKLDNNFunctor =
MKLDNNActivationFunc<T, dnnl::algorithm::eltwise_round>;
template <typename T>
using ReluMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, dnnl::algorithm::eltwise_relu>;
......@@ -330,6 +334,10 @@ namespace ops = paddle::operators;
ops::MKLDNNActivationGradKernel< \
ops::grad_functor<paddle::platform::bfloat16>>);
#define REGISTER_ACTIVATION_MKLDNN_KERNEL_FWD_ONLY(act_type, functor) \
REGISTER_OP_KERNEL(act_type, MKLDNN, ::paddle::platform::CPUPlace, \
ops::MKLDNNActivationKernel<ops::functor<float>>);
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
__macro(relu6, Relu6MKLDNNFunctor, Relu6MKLDNNGradFunctor); \
__macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
......@@ -341,6 +349,8 @@ namespace ops = paddle::operators;
__macro(exp, ExpMKLDNNFunctor, ExpMKLDNNGradUseOutFunctor);
FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL);
REGISTER_ACTIVATION_MKLDNN_KERNEL_FWD_ONLY(round, RoundMKLDNNFunctor);
REGISTER_ACTIVATION_MKLDNN_BF16_KERNEL(relu, ReluMKLDNNFunctor,
ReluMKLDNNGradFunctor);
REGISTER_ACTIVATION_MKLDNN_BF16_KERNEL(gelu, GeluMKLDNNFunctor,
......
......@@ -328,6 +328,18 @@ class TestMKLDNNMish(TestActivation):
self.attrs = {"use_mkldnn": True}
class TestMKLDNNRound(TestActivation):
def setUp(self):
self.op_type = "round"
x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32)
out = np.round(x)
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"use_mkldnn": True}
class TestMKLDNNSigmoidDim4(TestSigmoid):
def setUp(self):
super(TestMKLDNNSigmoidDim4, self).setUp()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册