Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
1893cd6b
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1893cd6b
编写于
8月 10, 2020
作者:
A
Adam
提交者:
GitHub
8月 10, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add oneDNN relu6 op (#26037)
* Add oneDNN relu6 op * Lint fixes
上级
a7c52100
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
30 addition
and
2 deletion
+30
-2
paddle/fluid/operators/activation_op.cc
paddle/fluid/operators/activation_op.cc
+3
-0
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
+13
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+4
-1
python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
...fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
+10
-1
未找到文件。
paddle/fluid/operators/activation_op.cc
浏览文件 @
1893cd6b
...
@@ -504,6 +504,9 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
...
@@ -504,6 +504,9 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr
<
float
>
(
"threshold"
,
AddAttr
<
float
>
(
"threshold"
,
"The threshold value of Relu6. Default is 6.0. "
)
"The threshold value of Relu6. Default is 6.0. "
)
.
SetDefault
(
6.0
f
);
.
SetDefault
(
6.0
f
);
AddAttr
<
bool
>
(
"use_mkldnn"
,
"(bool, default false) Only used in mkldnn kernel"
)
.
SetDefault
(
false
);
AddComment
(
R"DOC(
AddComment
(
R"DOC(
Relu6 Activation Operator.
Relu6 Activation Operator.
...
...
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
浏览文件 @
1893cd6b
...
@@ -76,6 +76,8 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
...
@@ -76,6 +76,8 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
// paddle uses beta but mkldnn uses alpha for swish
// paddle uses beta but mkldnn uses alpha for swish
if
(
algorithm
==
mkldnn
::
algorithm
::
eltwise_swish
)
{
if
(
algorithm
==
mkldnn
::
algorithm
::
eltwise_swish
)
{
std
::
swap
(
alpha
,
beta
);
std
::
swap
(
alpha
,
beta
);
}
else
if
(
algorithm
==
dnnl
::
algorithm
::
eltwise_bounded_relu
)
{
alpha
=
ctx
.
Attr
<
T
>
(
"threshold"
);
}
}
PADDLE_ENFORCE
(
PADDLE_ENFORCE
(
...
@@ -119,6 +121,8 @@ void eltwise_grad(const framework::ExecutionContext &ctx,
...
@@ -119,6 +121,8 @@ void eltwise_grad(const framework::ExecutionContext &ctx,
// paddle uses beta but mkldnn uses alpha for swish
// paddle uses beta but mkldnn uses alpha for swish
if
(
algorithm
==
mkldnn
::
algorithm
::
eltwise_swish
)
{
if
(
algorithm
==
mkldnn
::
algorithm
::
eltwise_swish
)
{
std
::
swap
(
alpha
,
beta
);
std
::
swap
(
alpha
,
beta
);
}
else
if
(
algorithm
==
dnnl
::
algorithm
::
eltwise_bounded_relu
)
{
alpha
=
ctx
.
Attr
<
T
>
(
"threshold"
);
}
}
auto
diff_dst_tz
=
framework
::
vectorize
<
int64_t
>
(
diff_y
->
dims
());
auto
diff_dst_tz
=
framework
::
vectorize
<
int64_t
>
(
diff_y
->
dims
());
...
@@ -192,6 +196,10 @@ template <typename T>
...
@@ -192,6 +196,10 @@ template <typename T>
using
ReluMKLDNNFunctor
=
using
ReluMKLDNNFunctor
=
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_relu
>
;
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_relu
>
;
template
<
typename
T
>
using
Relu6MKLDNNFunctor
=
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_bounded_relu
>
;
template
<
typename
T
>
template
<
typename
T
>
using
SwishMKLDNNFunctor
=
using
SwishMKLDNNFunctor
=
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_swish
>
;
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_swish
>
;
...
@@ -216,6 +224,10 @@ template <typename T>
...
@@ -216,6 +224,10 @@ template <typename T>
using
ReluMKLDNNGradFunctor
=
using
ReluMKLDNNGradFunctor
=
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_relu
>
;
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_relu
>
;
template
<
typename
T
>
using
Relu6MKLDNNGradFunctor
=
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_bounded_relu
>
;
template
<
typename
T
>
template
<
typename
T
>
using
SwishMKLDNNGradFunctor
=
using
SwishMKLDNNGradFunctor
=
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_swish
>
;
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_swish
>
;
...
@@ -249,6 +261,7 @@ namespace ops = paddle::operators;
...
@@ -249,6 +261,7 @@ namespace ops = paddle::operators;
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
__macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(relu6, Relu6MKLDNNFunctor, Relu6MKLDNNGradFunctor); \
__macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(gelu, GeluMKLDNNFunctor, GeluMKLDNNGradFunctor); \
__macro(gelu, GeluMKLDNNFunctor, GeluMKLDNNGradFunctor); \
__macro(swish, SwishMKLDNNFunctor, SwishMKLDNNGradFunctor); \
__macro(swish, SwishMKLDNNFunctor, SwishMKLDNNGradFunctor); \
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
1893cd6b
...
@@ -9375,7 +9375,10 @@ def relu6(x, threshold=6.0, name=None):
...
@@ -9375,7 +9375,10 @@ def relu6(x, threshold=6.0, name=None):
type='relu6',
type='relu6',
inputs={'X': x},
inputs={'X': x},
outputs={'Out': out},
outputs={'Out': out},
attrs={'threshold': threshold})
attrs={
'threshold': threshold,
'use_mkldnn': core.globals()["FLAGS_use_mkldnn"]
})
return out
return out
...
...
python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
浏览文件 @
1893cd6b
...
@@ -19,7 +19,7 @@ import numpy as np
...
@@ -19,7 +19,7 @@ import numpy as np
from
scipy.special
import
expit
from
scipy.special
import
expit
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
from
paddle.fluid.tests.unittests.op_test
import
OpTest
from
paddle.fluid.tests.unittests.test_activation_op
import
TestActivation
,
TestRelu
,
TestTanh
,
TestSqrt
,
TestAbs
,
TestLeakyRelu
,
TestSwish
,
TestSigmoid
from
paddle.fluid.tests.unittests.test_activation_op
import
TestActivation
,
TestRelu
,
TestTanh
,
TestSqrt
,
TestAbs
,
TestLeakyRelu
,
TestSwish
,
Test
Relu6
,
Test
Sigmoid
from
paddle.fluid.tests.unittests.test_gelu_op
import
gelu
from
paddle.fluid.tests.unittests.test_gelu_op
import
gelu
from
mkldnn_op_test
import
check_if_mkldnn_primitives_exist_in_bwd
from
mkldnn_op_test
import
check_if_mkldnn_primitives_exist_in_bwd
...
@@ -34,6 +34,15 @@ class TestMKLDNNReluDim2(TestRelu):
...
@@ -34,6 +34,15 @@ class TestMKLDNNReluDim2(TestRelu):
self
.
dtype
=
np
.
float32
self
.
dtype
=
np
.
float32
class
TestMKLDNNRelu6Dim2
(
TestRelu6
):
def
setUp
(
self
):
super
(
TestMKLDNNRelu6Dim2
,
self
).
setUp
()
self
.
attrs
.
update
({
"use_mkldnn"
:
True
})
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
class
TestMKLDNNLeakyReluDim2
(
TestLeakyRelu
):
class
TestMKLDNNLeakyReluDim2
(
TestLeakyRelu
):
def
setUp
(
self
):
def
setUp
(
self
):
super
(
TestMKLDNNLeakyReluDim2
,
self
).
setUp
()
super
(
TestMKLDNNLeakyReluDim2
,
self
).
setUp
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录