Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
2f116534
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
2f116534
编写于
2月 25, 2021
作者:
J
jakpiase
提交者:
GitHub
2月 25, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
OneDNN hardswish integration (#30211)
上级
912022fa
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
151 addition
and
17 deletion
+151
-17
paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc
...d/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc
+8
-0
paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.h
...id/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.h
+7
-0
paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc
...work/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc
+3
-0
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc
+3
-1
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.h
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.h
+2
-2
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc
...uid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc
+31
-0
paddle/fluid/inference/api/paddle_pass_builder.cc
paddle/fluid/inference/api/paddle_pass_builder.cc
+3
-2
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
+17
-8
paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc
paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc
+4
-0
paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc
paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc
+6
-0
python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_activation_fuse_pass.py
...sts/ir/inference/test_mkldnn_conv_activation_fuse_pass.py
+3
-3
python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py
...ts/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py
+22
-0
python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
...fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
+37
-1
python/paddle/fluid/tests/unittests/test_activation_op.py
python/paddle/fluid/tests/unittests/test_activation_op.py
+5
-0
未找到文件。
paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc
浏览文件 @
2f116534
...
...
@@ -135,3 +135,11 @@ REGISTER_PASS_CAPABILITY(conv_swish_mkldnn_fuse_pass)
paddle
::
framework
::
compatible
::
OpVersionComparatorCombination
()
.
LE
(
"conv2d"
,
1
)
.
EQ
(
"swish"
,
0
));
REGISTER_PASS
(
conv_hard_swish_mkldnn_fuse_pass
,
paddle
::
framework
::
ir
::
Conv2DHardSwishFusePass
);
REGISTER_PASS_CAPABILITY
(
conv_hard_swish_mkldnn_fuse_pass
)
.
AddCombination
(
paddle
::
framework
::
compatible
::
OpVersionComparatorCombination
()
.
LE
(
"conv2d"
,
1
)
.
EQ
(
"hard_swish"
,
0
));
paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.h
浏览文件 @
2f116534
...
...
@@ -60,6 +60,13 @@ class Conv2DSwishFusePass : public ConvActivationFusePass {
public:
std
::
string
activation_type
()
const
{
return
"swish"
;
}
};
/*
* Fuse Conv and HardSwish class
*/
class
Conv2DHardSwishFusePass
:
public
ConvActivationFusePass
{
public:
std
::
string
activation_type
()
const
{
return
"hard_swish"
;
}
};
}
// namespace ir
}
// namespace framework
}
// namespace paddle
paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc
浏览文件 @
2f116534
...
...
@@ -136,6 +136,9 @@ TEST(ConvActivationFusePass, conv_leaky_relu_fuse_pass) {
}
TEST
(
ConvActivationFusePass
,
conv_relu6_fuse_pass
)
{
MainTest
(
"relu6"
);
}
TEST
(
ConvActivationFusePass
,
conv_swish_fuse_pass
)
{
MainTest
(
"swish"
);
}
TEST
(
ConvActivationFusePass
,
conv_hard_swish_fuse_pass
)
{
MainTest
(
"hard_swish"
);
}
}
// namespace ir
}
// namespace framework
...
...
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc
浏览文件 @
2f116534
...
...
@@ -25,7 +25,8 @@ namespace ir {
using
string
::
PrettyLogDetail
;
void
FuseFCActOneDNNPass
::
ApplyImpl
(
Graph
*
graph
)
const
{
std
::
vector
<
std
::
string
>
act_types
=
{
"gelu"
,
"tanh"
,
"sigmoid"
};
std
::
vector
<
std
::
string
>
act_types
=
{
"gelu"
,
"tanh"
,
"sigmoid"
,
"hard_swish"
};
for
(
std
::
string
act_type
:
act_types
)
FuseFCAct
(
graph
,
act_type
);
}
...
...
@@ -97,4 +98,5 @@ REGISTER_PASS_CAPABILITY(fc_act_mkldnn_fuse_pass)
.
LE
(
"fc"
,
0
)
.
LE
(
"gelu"
,
0
)
.
LE
(
"sigmoid"
,
0
)
.
LE
(
"hard_swish"
,
0
)
.
LE
(
"tanh"
,
0
));
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.h
浏览文件 @
2f116534
...
...
@@ -27,8 +27,8 @@ namespace ir {
* \brief Fuse the FC and activation operators into single OneDNN's
* FC with post-op.
*
* \note Currently only GeLU,
sigmoid and tanh are supported as an activatio
n
* function.
* \note Currently only GeLU,
hardswish, sigmoid and tanh are supported as a
n
*
activation
function.
*/
class
FuseFCActOneDNNPass
:
public
FusePassBase
{
public:
...
...
paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc
浏览文件 @
2f116534
...
...
@@ -201,6 +201,37 @@ TEST(FuseFCActOneDNNPass, FuseWithSigmoid) {
}
}
TEST
(
FuseFCActOneDNNPass
,
FuseWithHardSwish
)
{
auto
prog
=
test
::
BuildProgramDesc
({
"x"
,
"fc_y"
,
"act_y"
},
{
"weights"
,
"bias"
});
test
::
CreateOp
(
&
prog
,
"fc"
,
{
{
"Input"
,
"x"
},
{
"Weights"
,
"weights"
},
{
"Bias"
,
"bias"
},
},
{{
"Out"
,
"fc_y"
}});
test
::
CreateOp
(
&
prog
,
"hard_swish"
,
{{
"Input"
,
"fc_y"
}},
{{
"Out"
,
"act_y"
}},
false
);
Graph
graph
(
prog
);
constexpr
int
removed_nodes_count
=
2
;
EXPECT_TRUE
(
test
::
RunPassAndAssert
(
&
graph
,
"fc_act_mkldnn_fuse_pass"
,
"x"
,
"act_y"
,
removed_nodes_count
));
EXPECT_TRUE
(
test
::
AssertOpsCount
(
graph
,
{{
"fc"
,
1
},
{
"hard_swish"
,
0
}}));
for
(
const
auto
*
node
:
graph
.
Nodes
())
{
if
(
node
->
IsOp
()
&&
node
->
Op
()
->
Type
()
==
"fc"
)
{
const
auto
*
op
=
node
->
Op
();
ASSERT_TRUE
(
op
->
HasAttr
(
"use_mkldnn"
));
EXPECT_TRUE
(
BOOST_GET_CONST
(
bool
,
op
->
GetAttr
(
"use_mkldnn"
)));
ASSERT_TRUE
(
op
->
HasAttr
(
"activation_type"
));
auto
act_type
=
BOOST_GET_CONST
(
std
::
string
,
op
->
GetAttr
(
"activation_type"
));
EXPECT_EQ
(
act_type
.
compare
(
"hard_swish"
),
0
);
}
}
}
TEST
(
FuseFCActOneDNNPass
,
pass_op_version_check
)
{
ASSERT_TRUE
(
paddle
::
framework
::
compatible
::
PassVersionCheckerRegistrar
::
GetInstance
()
...
...
paddle/fluid/inference/api/paddle_pass_builder.cc
浏览文件 @
2f116534
...
...
@@ -230,12 +230,13 @@ void CpuPassStrategy::EnableMKLDNN() {
"conv_leaky_relu_mkldnn_fuse_pass"
,
//
"conv_relu6_mkldnn_fuse_pass"
,
//
"conv_swish_mkldnn_fuse_pass"
,
//
"conv_hard_swish_mkldnn_fuse_pass"
,
//
"scale_matmul_fuse_pass"
,
//
"reshape_transpose_matmul_mkldnn_fuse_pass"
,
//
"matmul_transpose_reshape_fuse_pass"
,
//
// Disabled due to topology-dependent speed-up
//
"fc_mkldnn_pass",
//
"fc_act_mkldnn_fuse_pass",
//"fc_mkldnn_pass",
//"fc_act_mkldnn_fuse_pass",
"batch_norm_act_fuse_pass"
,
// TODO(intel): Please fix the bug on windows.
// https://github.com/PaddlePaddle/Paddle/issues/29710
...
...
paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc
浏览文件 @
2f116534
...
...
@@ -219,6 +219,10 @@ template <typename T>
using
SwishMKLDNNFunctor
=
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_swish
>
;
template
<
typename
T
>
using
HardSwishMKLDNNFunctor
=
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_hardswish
>
;
template
<
typename
T
>
using
SigmoidMKLDNNFunctor
=
MKLDNNActivationFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_logistic
>
;
...
...
@@ -247,6 +251,10 @@ template <typename T>
using
SwishMKLDNNGradFunctor
=
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_swish
>
;
template
<
typename
T
>
using
HardSwishMKLDNNGradFunctor
=
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_hardswish
>
;
template
<
typename
T
>
using
SigmoidMKLDNNGradFunctor
=
MKLDNNActivationGradFunc
<
T
,
mkldnn
::
algorithm
::
eltwise_logistic
>
;
...
...
@@ -284,14 +292,15 @@ namespace ops = paddle::operators;
act_type##_grad, MKLDNN, ::paddle::platform::CPUPlace, \
ops::MKLDNNActivationGradKernel<ops::grad_functor<float>>);
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
__macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(relu6, Relu6MKLDNNFunctor, Relu6MKLDNNGradFunctor); \
__macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(swish, SwishMKLDNNFunctor, SwishMKLDNNGradFunctor); \
__macro(sigmoid, SigmoidMKLDNNFunctor, SigmoidMKLDNNGradFunctor); \
__macro(tanh, TanhMKLDNNFunctor, TanhMKLDNNGradFunctor); \
__macro(sqrt, SqrtMKLDNNFunctor, SqrtMKLDNNGradFunctor); \
#define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \
__macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(relu6, Relu6MKLDNNFunctor, Relu6MKLDNNGradFunctor); \
__macro(leaky_relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \
__macro(swish, SwishMKLDNNFunctor, SwishMKLDNNGradFunctor); \
__macro(hardswish, HardSwishMKLDNNFunctor, HardSwishMKLDNNGradFunctor); \
__macro(sigmoid, SigmoidMKLDNNFunctor, SigmoidMKLDNNGradFunctor); \
__macro(tanh, TanhMKLDNNFunctor, TanhMKLDNNGradFunctor); \
__macro(sqrt, SqrtMKLDNNFunctor, SqrtMKLDNNGradFunctor); \
__macro(abs, AbsMKLDNNFunctor, AbsMKLDNNGradFunctor);
FOR_EACH_MKLDNN_KERNEL_FUNCTOR
(
REGISTER_ACTIVATION_MKLDNN_KERNEL
);
...
...
paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc
浏览文件 @
2f116534
...
...
@@ -271,6 +271,10 @@ class ConvMKLDNNHandlerT
constexpr
float
scale
=
1.0
f
;
post_operations
.
append_eltwise
(
scale
,
mkldnn
::
algorithm
::
eltwise_swish
,
fuse_alpha
,
fuse_beta
);
}
else
if
(
fuse_activation
==
"hard_swish"
)
{
constexpr
float
scale
=
1.0
f
;
post_operations
.
append_eltwise
(
scale
,
mkldnn
::
algorithm
::
eltwise_hardswish
,
fuse_alpha
,
fuse_beta
);
}
conv_attr
.
set_post_ops
(
post_operations
);
return
conv_attr
;
...
...
paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc
浏览文件 @
2f116534
...
...
@@ -489,6 +489,12 @@ class FCPrimitiveFactory {
constexpr
float
beta
=
0.0
f
;
post_operations
.
append_eltwise
(
scale
,
mkldnn
::
algorithm
::
eltwise_logistic
,
alpha
,
beta
);
}
else
if
(
ctx
.
Attr
<
std
::
string
>
(
"activation_type"
)
==
"hard_swish"
)
{
constexpr
float
scale
=
1.0
f
;
constexpr
float
alpha
=
0.0
f
;
constexpr
float
beta
=
0.0
f
;
post_operations
.
append_eltwise
(
scale
,
mkldnn
::
algorithm
::
eltwise_hardswish
,
alpha
,
beta
);
}
attributes
.
set_post_ops
(
post_operations
);
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_activation_fuse_pass.py
浏览文件 @
2f116534
...
...
@@ -93,13 +93,13 @@ class ConvActivationMkldnnFusePassTest_4(ConvActivationMkldnnFusePassTest):
self
.
pass_name
=
'conv_relu6_mkldnn_fuse_pass'
class
ConvActivationMkldnnFusePassTest_
4
(
ConvActivationMkldnnFusePassTest
):
class
ConvActivationMkldnnFusePassTest_
5
(
ConvActivationMkldnnFusePassTest
):
def
set_params
(
self
):
self
.
conv_num_filters
=
5
self
.
conv_filter_size
=
5
self
.
conv_bias_attr
=
True
self
.
act
=
"swish"
self
.
pass_name
=
'conv_swish_mkldnn_fuse_pass'
self
.
act
=
"
hard_
swish"
self
.
pass_name
=
'conv_
hard_
swish_mkldnn_fuse_pass'
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_fc_act_fuse_pass.py
浏览文件 @
2f116534
...
...
@@ -112,5 +112,27 @@ class FCSigmoidOneDnnFusePassTest(InferencePassTest):
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
self
.
pass_name
))
class
FCHardSwishOneDnnFusePassTest
(
InferencePassTest
):
def
setUp
(
self
):
self
.
set_params
()
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
data
=
fluid
.
data
(
name
=
"data"
,
shape
=
[
-
1
,
128
,
768
],
dtype
=
"float32"
)
fc_out
=
fluid
.
layers
.
fc
(
input
=
data
,
size
=
3072
,
num_flatten_dims
=
2
)
hardswish_out
=
fluid
.
layers
.
hard_swish
(
fc_out
)
self
.
feeds
=
{
"data"
:
np
.
random
.
random
((
1
,
128
,
768
)).
astype
(
"float32"
)}
self
.
fetch_list
=
[
hardswish_out
]
self
.
enable_mkldnn
=
True
def
set_params
(
self
):
self
.
pass_name
=
"fc_act_mkldnn_fuse_pass"
def
test_check_output
(
self
):
self
.
check_output
()
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
self
.
pass_name
))
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/mkldnn/test_activation_mkldnn_op.py
浏览文件 @
2f116534
...
...
@@ -19,7 +19,7 @@ import numpy as np
from
scipy.special
import
expit
import
paddle.fluid.core
as
core
from
paddle.fluid.tests.unittests.op_test
import
OpTest
,
convert_float_to_uint16
from
paddle.fluid.tests.unittests.test_activation_op
import
TestActivation
,
TestRelu
,
TestTanh
,
TestSqrt
,
TestAbs
,
TestLeakyRelu
,
TestSwish
,
TestRelu6
,
TestSigmoid
from
paddle.fluid.tests.unittests.test_activation_op
import
TestActivation
,
TestRelu
,
TestTanh
,
TestSqrt
,
TestAbs
,
TestLeakyRelu
,
TestSwish
,
Test
HardSwish
,
Test
Relu6
,
TestSigmoid
from
paddle.fluid.tests.unittests.test_gelu_op
import
gelu
from
mkldnn_op_test
import
check_if_mkldnn_primitives_exist_in_bwd
...
...
@@ -163,6 +163,16 @@ class TestMKLDNNSwishDim2(TestSwish):
self
.
dtype
=
np
.
float32
class
TestMKLDNNHardSwishDim2
(
TestHardSwish
):
def
setUp
(
self
):
super
(
TestMKLDNNHardSwishDim2
,
self
).
setUp
()
self
.
attrs
[
"use_mkldnn"
]
=
True
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
class
TestMKLDNNSigmoidDim2
(
TestSigmoid
):
def
setUp
(
self
):
super
(
TestMKLDNNSigmoidDim2
,
self
).
setUp
()
...
...
@@ -324,6 +334,32 @@ class TestMKLDNNSwishDim4(TestSwish):
self
.
dtype
=
np
.
float32
def
ref_hardswish
(
x
,
threshold
=
6.0
,
scale
=
6.0
,
offset
=
3.0
):
return
(
x
*
np
.
minimum
(
np
.
maximum
(
x
+
offset
,
0.
),
threshold
)
/
scale
).
astype
(
x
.
dtype
)
class
TestMKLDNNHardSwishDim4
(
TestHardSwish
):
def
setUp
(
self
):
super
(
TestMKLDNNHardSwishDim4
,
self
).
setUp
()
x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
4
,
3
,
5
]).
astype
(
self
.
dtype
)
threshold
=
6.0
scale
=
6.0
offset
=
3.0
x
[
np
.
abs
(
x
+
offset
)
<
0.005
]
=
0.02
x
[
np
.
abs
(
x
-
threshold
+
offset
)
<
0.005
]
=
threshold
-
offset
+
0.02
out
=
ref_hardswish
(
x
,
threshold
,
scale
,
offset
)
self
.
inputs
=
{
'X'
:
OpTest
.
np_dtype_to_fluid_dtype
(
x
)}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
"use_mkldnn"
:
True
}
def
init_dtype
(
self
):
self
.
dtype
=
np
.
float32
class
TestMKLDNNSigmoidDim4
(
TestSigmoid
):
def
setUp
(
self
):
super
(
TestMKLDNNSigmoidDim4
,
self
).
setUp
()
...
...
python/paddle/fluid/tests/unittests/test_activation_op.py
浏览文件 @
2f116534
...
...
@@ -1478,6 +1478,9 @@ class TestHardSwish(TestActivation):
self
.
op_type
=
'hard_swish'
self
.
init_dtype
()
from
op_test
import
skip_check_grad_ci
skip_check_grad_ci
(
reason
=
"not implemented yet"
)
np
.
random
.
seed
(
1024
)
x
=
np
.
random
.
uniform
(
-
6
,
6
,
[
10
,
12
]).
astype
(
self
.
dtype
)
threshold
=
6.0
...
...
@@ -1495,6 +1498,8 @@ class TestHardSwish(TestActivation):
def
test_check_grad
(
self
):
if
self
.
dtype
==
np
.
float16
:
return
return
# not implemented yet
self
.
check_grad
([
'X'
],
'Out'
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录