未验证 提交 1893cd6b 编写于 作者: A Adam 提交者: GitHub

Add oneDNN relu6 op (#26037)

* Add oneDNN relu6 op

* Lint fixes
上级 a7c52100
......@@ -504,6 +504,9 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("threshold",
"The threshold value of Relu6. Default is 6.0. ")
.SetDefault(6.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
AddComment(R"DOC(
Relu6 Activation Operator.
......
......@@ -76,6 +76,8 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
// paddle uses beta but mkldnn uses alpha for swish
if (algorithm == mkldnn::algorithm::eltwise_swish) {
std::swap(alpha, beta);
} else if (algorithm == dnnl::algorithm::eltwise_bounded_relu) {
alpha = ctx.Attr<T>("threshold");
}
PADDLE_ENFORCE(
......@@ -119,6 +121,8 @@ void eltwise_grad(const framework::ExecutionContext &ctx,
// paddle uses beta but mkldnn uses alpha for swish
if (algorithm == mkldnn::algorithm::eltwise_swish) {
std::swap(alpha, beta);
} else if (algorithm == dnnl::algorithm::eltwise_bounded_relu) {
alpha = ctx.Attr<T>("threshold");
}
auto diff_dst_tz = framework::vectorize<int64_t>(diff_y->dims());
......@@ -192,6 +196,10 @@ template <typename T>
using ReluMKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_relu>;
template <typename T>
using Relu6MKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_bounded_relu>;
template <typename T>
using SwishMKLDNNFunctor =
MKLDNNActivationFunc<T, mkldnn::algorithm::eltwise_swish>;
......@@ -216,6 +224,10 @@ template <typename T>
using ReluMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_relu>;
template <typename T>
using Relu6MKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_bounded_relu>;
template <typename T>
using SwishMKLDNNGradFunctor =
MKLDNNActivationGradFunc<T, mkldnn::algorithm::eltwise_swish>;
......@@ -249,6 +261,7 @@ namespace ops = paddle::operators;
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
__macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(relu6, Relu6MKLDNNFunctor, Relu6MKLDNNGradFunctor); \
__macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(gelu, GeluMKLDNNFunctor, GeluMKLDNNGradFunctor); \
__macro(swish, SwishMKLDNNFunctor, SwishMKLDNNGradFunctor); \
......
......@@ -9375,7 +9375,10 @@ def relu6(x, threshold=6.0, name=None):
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
attrs={
'threshold': threshold,
'use_mkldnn': core.globals()["FLAGS_use_mkldnn"]
})
return out
......
......@@ -19,7 +19,7 @@ import numpy as np
from scipy.special import expit
import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_activation_op import TestActivation, TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu, TestSwish, TestSigmoid
from paddle.fluid.tests.unittests.test_activation_op import TestActivation, TestRelu, TestTanh, TestSqrt, TestAbs, TestLeakyRelu, TestSwish, TestRelu6, TestSigmoid
from paddle.fluid.tests.unittests.test_gelu_op import gelu
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
......@@ -34,6 +34,15 @@ class TestMKLDNNReluDim2(TestRelu):
self.dtype = np.float32
class TestMKLDNNRelu6Dim2(TestRelu6):
def setUp(self):
super(TestMKLDNNRelu6Dim2, self).setUp()
self.attrs.update({"use_mkldnn": True})
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNLeakyReluDim2(TestLeakyRelu):
def setUp(self):
super(TestMKLDNNLeakyReluDim2, self).setUp()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册