Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
33fbb66e
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
33fbb66e
编写于
12月 13, 2021
作者:
Z
zhenlin
提交者:
GitHub
12月 13, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update 3 tests (#37922)
* update 3 tests * fix typo error
上级
e7f5d325
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
485 addition
and
259 deletion
+485
-259
paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
+15
-11
paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
+6
-3
python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt
.../paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt
+1
-0
python/paddle/fluid/tests/unittests/ir/inference/test_conv_affine_channel_fuse_pass.py
...ttests/ir/inference/test_conv_affine_channel_fuse_pass.py
+142
-210
python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py
...tests/ir/inference/test_conv_elementwise_add_fuse_pass.py
+138
-35
python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_affine_channel_fuse_pass.py
...nference/test_conv_eltwiseadd_affine_channel_fuse_pass.py
+183
-0
未找到文件。
paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
浏览文件 @
33fbb66e
// Copyright (c) 20
18
PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 20
21
PaddlePaddle Authors. All Rights Reserved.
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// you may not use this file except in compliance with the License.
...
@@ -130,7 +130,7 @@ ConvAffineChannelFusePass::ConvAffineChannelFusePass() {
...
@@ -130,7 +130,7 @@ ConvAffineChannelFusePass::ConvAffineChannelFusePass() {
.
IsType
<
std
::
vector
<
int
>>
()
.
IsType
<
std
::
vector
<
int
>>
()
.
End
()
.
End
()
.
AddAttr
(
"data_format"
)
.
AddAttr
(
"data_format"
)
.
IsStringIn
({
"NCHW"
,
"NHWC"
,
"AnyLayout"
})
.
IsStringIn
({
"NCHW"
/*, "NHWC", "AnyLayout"*/
})
.
End
();
.
End
();
AddOpCompat
(
OpCompat
(
"affine_channel"
))
AddOpCompat
(
OpCompat
(
"affine_channel"
))
...
@@ -148,7 +148,7 @@ ConvAffineChannelFusePass::ConvAffineChannelFusePass() {
...
@@ -148,7 +148,7 @@ ConvAffineChannelFusePass::ConvAffineChannelFusePass() {
.
IsTensor
()
.
IsTensor
()
.
End
()
.
End
()
.
AddAttr
(
"data_layout"
)
.
AddAttr
(
"data_layout"
)
.
IsStringIn
({
"NCHW"
,
"NHWC"
,
"AnyLayout"
})
.
IsStringIn
({
"NCHW"
/*, "NHWC", "AnyLayout"*/
})
.
End
();
.
End
();
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
...
@@ -197,19 +197,23 @@ void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
...
@@ -197,19 +197,23 @@ void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
GET_CONV_BN_NODES
(
conv_ac_pattern
);
GET_CONV_BN_NODES
(
conv_ac_pattern
);
// Get affine_channel bias for resizing eltwise_y!
auto
*
ac_bias_tensor
=
scope
->
FindVar
(
ac_bias
->
Name
())
->
GetMutable
<
LoDTensor
>
();
// Create eltwise_y (conv bias) variable
// Create eltwise_y (conv bias) variable
VarDesc
eltwise_y_in_desc
(
VarDesc
eltwise_y_in_desc
(
patterns
::
PDNodeName
(
name_scope_
,
"eltwise_y_in"
));
patterns
::
PDNodeName
(
name_scope_
,
"eltwise_y_in"
));
// Set shape && datatype manually
eltwise_y_in_desc
.
SetShape
(
framework
::
vectorize
(
ac_bias_tensor
->
dims
()));
eltwise_y_in_desc
.
SetDataType
(
ac_bias_tensor
->
type
());
eltwise_y_in_desc
.
SetLoDLevel
(
ac_bias
->
Var
()
->
GetLoDLevel
());
eltwise_y_in_desc
.
SetPersistable
(
true
);
eltwise_y_in_desc
.
SetPersistable
(
true
);
// Initialize eltwise_y
auto
*
eltwise_y_in_node
=
g
->
CreateVarNode
(
&
eltwise_y_in_desc
);
auto
*
eltwise_y_in_node
=
g
->
CreateVarNode
(
&
eltwise_y_in_desc
);
auto
*
eltwise_y_in_tensor
=
auto
*
eltwise_y_in_tensor
=
scope
->
Var
(
eltwise_y_in_node
->
Name
())
->
GetMutable
<
LoDTensor
>
();
scope
->
Var
(
eltwise_y_in_node
->
Name
())
->
GetMutable
<
LoDTensor
>
();
// Get affine_channel bias
auto
*
ac_bias_tensor
=
scope
->
FindVar
(
ac_bias
->
Name
())
->
GetMutable
<
LoDTensor
>
();
// Initialize eltwise_y
eltwise_y_in_tensor
->
Resize
(
ac_bias_tensor
->
dims
());
eltwise_y_in_tensor
->
Resize
(
ac_bias_tensor
->
dims
());
std
::
fill_n
(
eltwise_y_in_tensor
->
mutable_data
<
float
>
(
platform
::
CPUPlace
()),
std
::
fill_n
(
eltwise_y_in_tensor
->
mutable_data
<
float
>
(
platform
::
CPUPlace
()),
eltwise_y_in_tensor
->
numel
(),
0.0
f
);
eltwise_y_in_tensor
->
numel
(),
0.0
f
);
...
@@ -278,7 +282,7 @@ ConvEltwiseAddAffineChannelFusePass::ConvEltwiseAddAffineChannelFusePass() {
...
@@ -278,7 +282,7 @@ ConvEltwiseAddAffineChannelFusePass::ConvEltwiseAddAffineChannelFusePass() {
.
IsType
<
std
::
vector
<
int
>>
()
.
IsType
<
std
::
vector
<
int
>>
()
.
End
()
.
End
()
.
AddAttr
(
"data_format"
)
.
AddAttr
(
"data_format"
)
.
IsStringIn
({
"NCHW"
,
"NHWC"
,
"AnyLayout"
})
.
IsStringIn
({
"NCHW"
/*, "NHWC", "AnyLayout"*/
})
.
End
();
.
End
();
AddOpCompat
(
OpCompat
(
"affine_channel"
))
AddOpCompat
(
OpCompat
(
"affine_channel"
))
.
AddInput
(
"X"
)
.
AddInput
(
"X"
)
...
@@ -295,7 +299,7 @@ ConvEltwiseAddAffineChannelFusePass::ConvEltwiseAddAffineChannelFusePass() {
...
@@ -295,7 +299,7 @@ ConvEltwiseAddAffineChannelFusePass::ConvEltwiseAddAffineChannelFusePass() {
.
IsTensor
()
.
IsTensor
()
.
End
()
.
End
()
.
AddAttr
(
"data_layout"
)
.
AddAttr
(
"data_layout"
)
.
IsStringIn
({
"NCHW"
,
"NHWC"
,
"AnyLayout"
})
.
IsStringIn
({
"NCHW"
/*, "NHWC", "AnyLayout"*/
})
.
End
();
.
End
();
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
.
AddInput
(
"X"
)
.
AddInput
(
"X"
)
...
...
paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
浏览文件 @
33fbb66e
// Copyright (c) 20
18
PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 20
21
PaddlePaddle Authors. All Rights Reserved.
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// you may not use this file except in compliance with the License.
...
@@ -57,7 +57,7 @@ ConvElementwiseAddFusePass::ConvElementwiseAddFusePass() {
...
@@ -57,7 +57,7 @@ ConvElementwiseAddFusePass::ConvElementwiseAddFusePass() {
.
AddAttr
(
"dilations"
)
.
AddAttr
(
"dilations"
)
.
End
()
.
End
()
.
AddAttr
(
"data_format"
)
.
AddAttr
(
"data_format"
)
.
IsStringIn
({
"NCHW"
,
"NHWC"
,
"AnyLayout"
})
.
IsStringIn
({
"NCHW"
/*, "NHWC", "AnyLayout"*/
})
.
End
();
.
End
();
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
AddOpCompat
(
OpCompat
(
"elementwise_add"
))
...
@@ -87,7 +87,7 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
...
@@ -87,7 +87,7 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
patterns
::
ConvElementwiseadd
pattern
(
gpd
.
mutable_pattern
(),
pattern_name
);
patterns
::
ConvElementwiseadd
pattern
(
gpd
.
mutable_pattern
(),
pattern_name
);
pattern
(
x
);
pattern
(
x
);
int
found_conv_eltwise_count
=
0
;
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
auto
handler
=
[
&
](
const
GraphPatternDetector
::
subgraph_t
&
subgraph
,
Graph
*
g
)
{
Graph
*
g
)
{
if
(
!
IsCompat
(
subgraph
,
g
))
{
if
(
!
IsCompat
(
subgraph
,
g
))
{
...
@@ -135,9 +135,12 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
...
@@ -135,9 +135,12 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
// Delete the unneeded nodes.
// Delete the unneeded nodes.
GraphSafeRemoveNodes
(
graph
,
{
conv_op
,
conv_out
,
elementwise_add_op
});
GraphSafeRemoveNodes
(
graph
,
{
conv_op
,
conv_out
,
elementwise_add_op
});
found_conv_eltwise_count
++
;
};
};
gpd
(
graph
,
handler
);
gpd
(
graph
,
handler
);
// check if detect conv2d_fusion subgraph!
AddStatis
(
found_conv_eltwise_count
);
}
}
}
// namespace ir
}
// namespace ir
...
...
python/paddle/fluid/tests/unittests/ir/inference/CMakeLists.txt
浏览文件 @
33fbb66e
...
@@ -80,6 +80,7 @@ if (WITH_MKLDNN AND TENSORRT_FOUND AND WITH_GPU)
...
@@ -80,6 +80,7 @@ if (WITH_MKLDNN AND TENSORRT_FOUND AND WITH_GPU)
set_tests_properties
(
test_fc_fuse_pass PROPERTIES TIMEOUT 240
)
set_tests_properties
(
test_fc_fuse_pass PROPERTIES TIMEOUT 240
)
set_tests_properties
(
test_simplify_with_basic_ops_pass_autoscan PROPERTIES TIMEOUT 60
)
set_tests_properties
(
test_simplify_with_basic_ops_pass_autoscan PROPERTIES TIMEOUT 60
)
set_tests_properties
(
test_adaptive_pool2d_convert_global_pass_autoscan PROPERTIES TIMEOUT 60
)
set_tests_properties
(
test_adaptive_pool2d_convert_global_pass_autoscan PROPERTIES TIMEOUT 60
)
set_tests_properties
(
test_conv_eltwiseadd_affine_channel_fuse_pass PROPERTIES TIMEOUT 100
)
endif
()
endif
()
if
(
WITH_MKLDNN
)
if
(
WITH_MKLDNN
)
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_conv_affine_channel_fuse_pass.py
浏览文件 @
33fbb66e
# Copyright (c) 202
0
PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
@@ -12,216 +12,148 @@
...
@@ -12,216 +12,148 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
unittest
from
auto_scan_test
import
PassAutoScanTest
,
IgnoreReasons
from
program_config
import
TensorConfig
,
ProgramConfig
,
OpConfig
import
numpy
as
np
import
numpy
as
np
from
inference_pass_test
import
InferencePassTest
import
paddle.inference
as
paddle_infer
import
paddle.fluid
as
fluid
from
functools
import
partial
import
paddle.fluid.core
as
core
from
typing
import
Optional
,
List
,
Callable
,
Dict
,
Any
,
Set
from
paddle.fluid.core
import
PassVersionChecker
import
unittest
import
hypothesis
class
ConvAffineChannelFusePassExplicitPaddingTest
(
InferencePassTest
):
from
hypothesis
import
given
,
settings
,
seed
,
example
,
assume
,
reproduce_failure
def
setUp
(
self
):
import
hypothesis.strategies
as
st
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
data
=
fluid
.
data
(
name
=
"data"
,
shape
=
[
-
1
,
3
,
64
,
64
],
dtype
=
"float32"
)
class
TestConvAffineChannelFusePass
(
PassAutoScanTest
):
conv_out
=
fluid
.
layers
.
conv2d
(
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
input
=
data
,
return
True
num_filters
=
3
,
filter_size
=
3
,
def
sample_program_config
(
self
,
draw
):
groups
=
3
,
padding_algorithm
=
draw
(
st
.
sampled_from
([
"EXPLICIT"
,
"SAME"
,
"VALID"
]))
padding
=
[
1
,
1
,
1
,
1
],
groups
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
3
))
bias_attr
=
False
,
data_format
=
draw
(
st
.
sampled_from
([
"NCHW"
,
"NHWC"
]))
act
=
None
)
axis
=
draw
(
st
.
sampled_from
([
1
]))
input_scale
=
fluid
.
layers
.
create_parameter
(
filter_channel
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
16
))
*
4
shape
=
[
3
],
dtype
=
"float32"
)
filter_size
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
input_bias
=
fluid
.
layers
.
create_parameter
(
in_channel
=
groups
*
filter_channel
shape
=
[
3
],
dtype
=
"float32"
)
out_channel_factor
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
16
))
*
4
ac_out
=
fluid
.
layers
.
affine_channel
(
out_channel
=
groups
*
out_channel_factor
x
=
conv_out
,
scale
=
input_scale
,
bias
=
input_bias
)
batch_size
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
dilations
=
draw
(
self
.
feeds
=
{
st
.
lists
(
"data"
:
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
"float32"
),
st
.
integers
(
}
min_value
=
1
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
self
.
fetch_list
=
[
ac_out
]
paddings
=
draw
(
st
.
lists
(
def
test_check_output
(
self
):
st
.
integers
(
self
.
check_output
()
min_value
=
0
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
strides
=
draw
(
self
.
assertTrue
(
st
.
lists
(
PassVersionChecker
.
IsCompatible
(
'conv_affine_channel_fuse_pass'
))
st
.
integers
(
min_value
=
1
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
has_bias
=
draw
(
st
.
booleans
())
class
ConvAffineChannelFusePassValidPaddingTest
(
InferencePassTest
):
def
setUp
(
self
):
x_shape
=
[
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
batch_size
,
in_channel
,
64
,
64
data
=
fluid
.
data
(
]
if
data_format
==
"NCHW"
else
[
batch_size
,
64
,
64
,
in_channel
]
name
=
"data"
,
shape
=
[
-
1
,
3
,
64
,
64
],
dtype
=
"float32"
)
w_shape
=
[
out_channel
,
filter_channel
,
filter_size
,
filter_size
]
conv_out
=
fluid
.
layers
.
conv2d
(
scale_shape
=
[
out_channel
]
input
=
data
,
bias_shape
=
[
out_channel
]
num_filters
=
3
,
filter_size
=
3
,
def
generate_input
():
groups
=
3
,
return
np
.
random
.
random
(
x_shape
).
astype
(
np
.
float32
)
padding
=
'VALID'
,
bias_attr
=
False
,
def
generate_weight
():
act
=
None
)
return
np
.
random
.
random
(
w_shape
).
astype
(
np
.
float32
)
input_scale
=
fluid
.
layers
.
create_parameter
(
shape
=
[
3
],
dtype
=
"float32"
)
def
generate_bias
():
input_bias
=
fluid
.
layers
.
create_parameter
(
return
np
.
random
.
random
(
bias_shape
).
astype
(
np
.
float32
)
shape
=
[
3
],
dtype
=
"float32"
)
ac_out
=
fluid
.
layers
.
affine_channel
(
def
generate_scale_bias
():
x
=
conv_out
,
scale
=
input_scale
,
bias
=
input_bias
)
return
np
.
random
.
random
(
bias_shape
).
astype
(
np
.
float32
)
self
.
feeds
=
{
conv2d_op
=
OpConfig
(
"data"
:
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
"float32"
),
"conv2d"
,
}
inputs
=
{
self
.
fetch_list
=
[
ac_out
]
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
],
def
test_check_output
(
self
):
},
self
.
check_output
()
outputs
=
{
"Output"
:
[
"conv_output"
]},
data_format
=
data_format
,
self
.
assertTrue
(
dilations
=
dilations
,
PassVersionChecker
.
IsCompatible
(
'conv_affine_channel_fuse_pass'
))
padding_algorithm
=
padding_algorithm
,
groups
=
groups
,
paddings
=
paddings
,
class
ConvAffineChannelFusePassSamePaddingTest
(
InferencePassTest
):
strides
=
strides
,
def
setUp
(
self
):
has_bias
=
has_bias
,
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
is_test
=
True
)
data
=
fluid
.
data
(
ac_op
=
OpConfig
(
name
=
"data"
,
shape
=
[
-
1
,
3
,
64
,
64
],
dtype
=
"float32"
)
"affine_channel"
,
conv_out
=
fluid
.
layers
.
conv2d
(
inputs
=
{
input
=
data
,
"X"
:
[
"conv_output"
],
num_filters
=
3
,
"Scale"
:
[
"affine_channel_scale"
],
filter_size
=
3
,
"Bias"
:
[
"affine_channel_bias"
]
groups
=
3
,
},
padding
=
'SAME'
,
outputs
=
{
"Out"
:
[
"affine_channel_ouput"
]},
bias_attr
=
False
,
data_layout
=
data_format
)
act
=
None
)
if
has_bias
==
True
:
input_scale
=
fluid
.
layers
.
create_parameter
(
conv2d_op
.
inputs
[
"Bias"
]
=
[
"conv2d_bias"
]
shape
=
[
3
],
dtype
=
"float32"
)
ops
=
[
conv2d_op
,
ac_op
]
input_bias
=
fluid
.
layers
.
create_parameter
(
shape
=
[
3
],
dtype
=
"float32"
)
program_config
=
ProgramConfig
(
ac_out
=
fluid
.
layers
.
affine_channel
(
ops
=
ops
,
x
=
conv_out
,
scale
=
input_scale
,
bias
=
input_bias
)
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
)),
self
.
feeds
=
{
},
"data"
:
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
"float32"
),
weights
=
{
}
"conv2d_weight"
:
self
.
fetch_list
=
[
ac_out
]
TensorConfig
(
data_gen
=
partial
(
generate_weight
)),
"affine_channel_scale"
:
def
test_check_output
(
self
):
TensorConfig
(
data_gen
=
partial
(
generate_scale_bias
)),
self
.
check_output
()
"affine_channel_bias"
:
TensorConfig
(
data_gen
=
partial
(
generate_scale_bias
)),
self
.
assertTrue
(
},
PassVersionChecker
.
IsCompatible
(
'conv_affine_channel_fuse_pass'
))
outputs
=
[
"affine_channel_ouput"
])
if
has_bias
==
True
:
program_config
.
weights
[
"conv2d_bias"
]
=
TensorConfig
(
class
ConvEltwiseAddAffineChannelFusePassExplicitPaddingTest
(
InferencePassTest
):
data_gen
=
partial
(
generate_bias
))
def
setUp
(
self
):
return
program_config
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
data
=
fluid
.
data
(
def
sample_predictor_configs
(
self
,
program_config
):
name
=
"data"
,
shape
=
[
-
1
,
3
,
64
,
64
],
dtype
=
"float32"
)
config
=
self
.
create_inference_config
(
use_gpu
=
True
)
param_attr
=
fluid
.
ParamAttr
(
yield
config
,
[
'conv2d'
,
'elementwise_add'
],
(
1e-4
,
1e-4
)
initializer
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
),
learning_rate
=
0.001
)
config
=
self
.
create_inference_config
(
use_mkldnn
=
True
)
conv_out
=
fluid
.
layers
.
conv2d
(
yield
config
,
[
'conv2d'
,
'elementwise_add'
],
(
1e-4
,
1e-4
)
input
=
data
,
num_filters
=
3
,
def
add_ignore_pass_case
(
self
):
filter_size
=
3
,
# If the problem has been fixed, the judgment
groups
=
3
,
# in is_program_valid needs to be deleted!!!
padding
=
[
1
,
1
,
1
,
1
],
def
teller1
(
program_config
,
predictor_config
):
bias_attr
=
param_attr
,
if
program_config
.
ops
[
0
].
attrs
[
'data_format'
]
==
"NHWC"
:
act
=
None
)
return
True
input_scale
=
fluid
.
layers
.
create_parameter
(
return
False
shape
=
[
3
],
dtype
=
"float32"
)
input_bias
=
fluid
.
layers
.
create_parameter
(
# mkldnn Output has diff with bias!
shape
=
[
3
],
dtype
=
"float32"
)
def
teller2
(
program_config
,
predictor_config
):
ac_out
=
fluid
.
layers
.
affine_channel
(
return
predictor_config
.
mkldnn_enabled
()
and
program_config
.
ops
[
x
=
conv_out
,
scale
=
input_scale
,
bias
=
input_bias
)
0
].
attrs
[
'has_bias'
]
==
True
self
.
feeds
=
{
self
.
add_ignore_check_case
(
"data"
:
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
"float32"
),
teller1
,
IgnoreReasons
.
PASS_ACCURACY_ERROR
,
}
"The output format of conv2d is wrong when data_format attribute is NHWC,
\
self
.
fetch_list
=
[
ac_out
]
because currently its fused op (Conv2DFusion) only supports data format of channel first (NCHW)."
)
def
test_check_output
(
self
):
self
.
check_output
()
self
.
add_ignore_check_case
(
teller2
,
IgnoreReasons
.
PASS_ACCURACY_ERROR
,
self
.
assertTrue
(
"Currently mkldnn Output has diff with bias!"
)
PassVersionChecker
.
IsCompatible
(
'conv_eltwiseadd_affine_channel_fuse_pass'
))
def
test
(
self
):
self
.
run_and_statis
(
quant
=
False
,
class
ConvEltwiseAddAffineChannelFusePassValidPaddingTest
(
InferencePassTest
):
passes
=
[
"conv_affine_channel_fuse_pass"
],
)
def
setUp
(
self
):
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
data
=
fluid
.
data
(
name
=
"data"
,
shape
=
[
-
1
,
3
,
64
,
64
],
dtype
=
"float32"
)
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
),
learning_rate
=
0.001
)
conv_out
=
fluid
.
layers
.
conv2d
(
input
=
data
,
num_filters
=
3
,
filter_size
=
3
,
groups
=
3
,
padding
=
'VALID'
,
bias_attr
=
param_attr
,
act
=
None
)
input_scale
=
fluid
.
layers
.
create_parameter
(
shape
=
[
3
],
dtype
=
"float32"
)
input_bias
=
fluid
.
layers
.
create_parameter
(
shape
=
[
3
],
dtype
=
"float32"
)
ac_out
=
fluid
.
layers
.
affine_channel
(
x
=
conv_out
,
scale
=
input_scale
,
bias
=
input_bias
)
self
.
feeds
=
{
"data"
:
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
"float32"
),
}
self
.
fetch_list
=
[
ac_out
]
def
test_check_output
(
self
):
self
.
check_output
()
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
'conv_eltwiseadd_affine_channel_fuse_pass'
))
class
ConvEltwiseAddAffineChannelFusePassSamePaddingTest
(
InferencePassTest
):
def
setUp
(
self
):
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
data
=
fluid
.
data
(
name
=
"data"
,
shape
=
[
-
1
,
3
,
64
,
64
],
dtype
=
"float32"
)
param_attr
=
fluid
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
),
learning_rate
=
0.001
)
conv_out
=
fluid
.
layers
.
conv2d
(
input
=
data
,
num_filters
=
3
,
filter_size
=
3
,
groups
=
3
,
padding
=
'Same'
,
bias_attr
=
param_attr
,
act
=
None
)
input_scale
=
fluid
.
layers
.
create_parameter
(
shape
=
[
3
],
dtype
=
"float32"
)
input_bias
=
fluid
.
layers
.
create_parameter
(
shape
=
[
3
],
dtype
=
"float32"
)
ac_out
=
fluid
.
layers
.
affine_channel
(
x
=
conv_out
,
scale
=
input_scale
,
bias
=
input_bias
)
self
.
feeds
=
{
"data"
:
np
.
random
.
random
([
1
,
3
,
64
,
64
]).
astype
(
"float32"
),
}
self
.
fetch_list
=
[
ac_out
]
def
test_check_output
(
self
):
self
.
check_output
()
self
.
assertTrue
(
PassVersionChecker
.
IsCompatible
(
'conv_eltwiseadd_affine_channel_fuse_pass'
))
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_conv_elementwise_add_fuse_pass.py
浏览文件 @
33fbb66e
# Copyright (c) 202
0
PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 202
1
PaddlePaddle Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
@@ -12,41 +12,144 @@
...
@@ -12,41 +12,144 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
__future__
import
print_function
from
auto_scan_test
import
PassAutoScanTest
,
IgnoreReasons
from
program_config
import
TensorConfig
,
ProgramConfig
,
OpConfig
import
unittest
import
numpy
as
np
import
numpy
as
np
from
inference_pass_test
import
InferencePassTest
import
paddle.inference
as
paddle_infer
import
paddle.fluid
as
fluid
from
functools
import
partial
import
paddle.fluid.core
as
core
from
typing
import
Optional
,
List
,
Callable
,
Dict
,
Any
,
Set
from
paddle.fluid.core
import
PassVersionChecker
import
unittest
from
paddle.fluid.core
import
AnalysisConfig
"""Test for fusion of conv and elementwise_add."""
import
hypothesis
from
hypothesis
import
given
,
settings
,
seed
,
example
,
assume
import
hypothesis.strategies
as
st
class
ConvElementwiseAddFusePassTest
(
InferencePassTest
):
def
setUp
(
self
):
with
fluid
.
program_guard
(
self
.
main_program
,
self
.
startup_program
):
class
TestConvEltwiseAddFusePass
(
PassAutoScanTest
):
data
=
fluid
.
data
(
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
name
=
"data"
,
shape
=
[
-
1
,
3
,
100
,
100
],
dtype
=
"float32"
)
attrs
=
[
param_attr
=
fluid
.
ParamAttr
(
program_config
.
ops
[
i
].
attrs
initializer
=
fluid
.
initializer
.
Xavier
(
uniform
=
False
),
for
i
in
range
(
len
(
program_config
.
ops
))
learning_rate
=
0.001
)
]
conv_out
=
fluid
.
layers
.
conv2d
(
input
=
data
,
num_filters
=
3
,
filter_size
=
3
,
bias_attr
=
param_attr
)
if
attrs
[
0
][
'data_format'
]
==
"NHWC"
and
attrs
[
1
][
'axis'
]
!=
3
:
return
False
self
.
feeds
=
{
"data"
:
np
.
random
.
random
((
1
,
3
,
100
,
100
)).
astype
(
"float32"
)
return
True
}
self
.
fetch_list
=
[
conv_out
]
def
sample_program_config
(
self
,
draw
):
self
.
enable_mkldnn
=
False
padding_algorithm
=
draw
(
st
.
sampled_from
([
"EXPLICIT"
,
"SAME"
,
"VALID"
]))
groups
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
3
))
def
test_check_output
(
self
):
data_format
=
draw
(
st
.
sampled_from
([
"NCHW"
,
"NHWC"
]))
if
core
.
is_compiled_with_cuda
():
axis
=
draw
(
st
.
sampled_from
([
1
]))
use_gpu
=
True
filter_channel
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
16
))
*
4
self
.
check_output_with_option
(
use_gpu
)
filter_size
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
self
.
assertTrue
(
in_channel
=
groups
*
filter_channel
PassVersionChecker
.
IsCompatible
(
'conv_elementwise_add_fuse_pass'
))
out_channel_factor
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
16
))
*
4
out_channel
=
groups
*
out_channel_factor
batch_size
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
dilations
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
paddings
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
0
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
strides
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
x_shape
=
[
batch_size
,
in_channel
,
64
,
64
]
if
data_format
==
"NCHW"
else
[
batch_size
,
64
,
64
,
in_channel
]
w_shape
=
[
out_channel
,
filter_channel
,
filter_size
,
filter_size
]
scale_shape
=
[
out_channel
]
bias_shape
=
[
out_channel
]
def
generate_input
():
return
np
.
random
.
random
(
x_shape
).
astype
(
np
.
float32
)
def
generate_weight
():
return
np
.
random
.
random
(
w_shape
).
astype
(
np
.
float32
)
def
generate_bias
():
return
np
.
random
.
random
(
bias_shape
).
astype
(
np
.
float32
)
def
generate_scale_bias
():
return
np
.
random
.
random
(
bias_shape
).
astype
(
np
.
float32
)
conv2d_op
=
OpConfig
(
"conv2d"
,
inputs
=
{
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
],
},
outputs
=
{
"Output"
:
[
"conv_output"
]},
data_format
=
data_format
,
dilations
=
dilations
,
padding_algorithm
=
padding_algorithm
,
groups
=
groups
,
paddings
=
paddings
,
strides
=
strides
,
is_test
=
True
)
eltwise_op
=
OpConfig
(
"elementwise_add"
,
inputs
=
{
"X"
:
[
"conv_output"
],
"Y"
:
[
"conv2d_bias"
]},
outputs
=
{
"Out"
:
[
"elementwise_output"
]},
axis
=
axis
)
ops
=
[
conv2d_op
,
eltwise_op
]
program_config
=
ProgramConfig
(
ops
=
ops
,
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
)),
},
weights
=
{
"conv2d_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
)),
"conv2d_bias"
:
TensorConfig
(
data_gen
=
partial
(
generate_scale_bias
)),
},
outputs
=
[
"elementwise_output"
])
return
program_config
def
sample_predictor_configs
(
self
,
program_config
):
config
=
self
.
create_inference_config
(
use_gpu
=
True
)
yield
config
,
[
'conv2d_fusion'
],
(
1e-4
,
1e-4
)
# # TRT
config
=
self
.
create_trt_inference_config
()
config
.
enable_tensorrt_engine
(
workspace_size
=
1
<<
20
,
max_batch_size
=
4
,
min_subgraph_size
=
1
,
precision_mode
=
paddle_infer
.
PrecisionType
.
Float32
,
use_static
=
False
,
use_calib_mode
=
False
)
yield
config
,
[
'conv2d_fusion'
],
(
1e-4
,
1e-4
)
def
add_ignore_pass_case
(
self
):
# If the problem has been fixed, the judgment
# in is_program_valid needs to be deleted!!!
def
teller1
(
program_config
,
predictor_config
):
if
program_config
.
ops
[
0
].
attrs
[
'data_format'
]
==
"NHWC"
:
return
True
return
False
self
.
add_ignore_check_case
(
teller1
,
IgnoreReasons
.
PASS_ACCURACY_ERROR
,
"The output format of conv2d is wrong when data_format attribute is NHWC,
\
it will trigger Broadcast dimension mismatch bug
\
when data_format attribute is NHWC and axis of eltwise op is 1 for this pass."
)
def
test
(
self
):
self
.
run_and_statis
(
quant
=
False
,
passes
=
[
"conv_elementwise_add_fuse_pass"
],
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_conv_eltwiseadd_affine_channel_fuse_pass.py
0 → 100644
浏览文件 @
33fbb66e
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
auto_scan_test
import
PassAutoScanTest
,
IgnoreReasons
from
program_config
import
TensorConfig
,
ProgramConfig
,
OpConfig
import
numpy
as
np
import
paddle.inference
as
paddle_infer
from
functools
import
partial
from
typing
import
Optional
,
List
,
Callable
,
Dict
,
Any
,
Set
import
unittest
import
hypothesis
from
hypothesis
import
given
,
settings
,
seed
,
example
,
assume
import
hypothesis.strategies
as
st
class
TestConvEltwiseAddAffineChannelFusePass
(
PassAutoScanTest
):
def
is_program_valid
(
self
,
program_config
:
ProgramConfig
)
->
bool
:
attrs
=
[
program_config
.
ops
[
i
].
attrs
for
i
in
range
(
len
(
program_config
.
ops
))
]
if
attrs
[
0
][
'data_format'
]
==
"NHWC"
and
attrs
[
1
][
'axis'
]
!=
3
:
return
False
return
True
def
sample_program_config
(
self
,
draw
):
padding_algorithm
=
draw
(
st
.
sampled_from
([
"EXPLICIT"
,
"SAME"
,
"VALID"
]))
groups
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
3
))
data_format
=
draw
(
st
.
sampled_from
([
"NCHW"
,
"NHWC"
]))
axis
=
draw
(
st
.
sampled_from
([
1
]))
filter_channel
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
16
))
*
4
filter_size
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
in_channel
=
groups
*
filter_channel
out_channel_factor
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
16
))
*
4
out_channel
=
groups
*
out_channel_factor
batch_size
=
draw
(
st
.
integers
(
min_value
=
1
,
max_value
=
4
))
dilations
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
paddings
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
0
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
strides
=
draw
(
st
.
lists
(
st
.
integers
(
min_value
=
1
,
max_value
=
2
),
min_size
=
2
,
max_size
=
2
))
has_bias
=
draw
(
st
.
booleans
())
x_shape
=
[
batch_size
,
in_channel
,
64
,
64
]
if
data_format
==
"NCHW"
else
[
batch_size
,
64
,
64
,
in_channel
]
w_shape
=
[
out_channel
,
filter_channel
,
filter_size
,
filter_size
]
scale_shape
=
[
out_channel
]
bias_shape
=
[
out_channel
]
def
generate_input
():
return
np
.
random
.
random
(
x_shape
).
astype
(
np
.
float32
)
def
generate_weight
():
return
np
.
random
.
random
(
w_shape
).
astype
(
np
.
float32
)
def
generate_bias
():
return
np
.
random
.
random
(
bias_shape
).
astype
(
np
.
float32
)
def
generate_scale_bias
():
return
np
.
random
.
random
(
bias_shape
).
astype
(
np
.
float32
)
conv2d_op
=
OpConfig
(
"conv2d"
,
inputs
=
{
"Input"
:
[
"input_data"
],
"Filter"
:
[
"conv2d_weight"
],
},
outputs
=
{
"Output"
:
[
"conv_output"
]},
data_format
=
data_format
,
dilations
=
dilations
,
padding_algorithm
=
padding_algorithm
,
groups
=
groups
,
paddings
=
paddings
,
strides
=
strides
,
has_bias
=
has_bias
,
is_test
=
True
)
eltwise_op
=
OpConfig
(
"elementwise_add"
,
inputs
=
{
"X"
:
[
"conv_output"
],
"Y"
:
[
"conv2d_bias"
]},
outputs
=
{
"Out"
:
[
"elementwise_output"
]},
axis
=
axis
)
ac_op
=
OpConfig
(
"affine_channel"
,
inputs
=
{
"X"
:
[
"elementwise_output"
],
"Scale"
:
[
"affine_channel_scale"
],
"Bias"
:
[
"affine_channel_bias"
]
},
outputs
=
{
"Out"
:
[
"affine_channel_ouput"
]},
data_layout
=
data_format
)
if
has_bias
==
True
:
conv2d_op
.
inputs
[
"Bias"
]
=
[
"conv2d_bias"
]
ops
=
[
conv2d_op
,
eltwise_op
,
ac_op
]
program_config
=
ProgramConfig
(
ops
=
ops
,
inputs
=
{
"input_data"
:
TensorConfig
(
data_gen
=
partial
(
generate_input
)),
},
weights
=
{
"conv2d_weight"
:
TensorConfig
(
data_gen
=
partial
(
generate_weight
)),
"conv2d_bias"
:
TensorConfig
(
data_gen
=
partial
(
generate_bias
)),
"affine_channel_scale"
:
TensorConfig
(
data_gen
=
partial
(
generate_scale_bias
)),
"affine_channel_bias"
:
TensorConfig
(
data_gen
=
partial
(
generate_scale_bias
)),
},
outputs
=
[
"affine_channel_ouput"
])
return
program_config
def
sample_predictor_configs
(
self
,
program_config
):
config
=
self
.
create_inference_config
(
use_gpu
=
True
)
yield
config
,
[
'conv2d'
,
'elementwise_add'
],
(
1e-4
,
1e-4
)
config
=
self
.
create_inference_config
(
use_mkldnn
=
True
)
yield
config
,
[
'conv2d'
,
'elementwise_add'
],
(
1e-4
,
1e-4
)
# TRT
config
=
self
.
create_trt_inference_config
()
config
.
enable_tensorrt_engine
(
workspace_size
=
1
<<
20
,
max_batch_size
=
4
,
min_subgraph_size
=
1
,
precision_mode
=
paddle_infer
.
PrecisionType
.
Float32
,
use_static
=
False
,
use_calib_mode
=
False
)
yield
config
,
[
'conv2d'
,
'elementwise_add'
],
(
1e-4
,
1e-4
)
def
add_ignore_pass_case
(
self
):
# If the problem has been fixed, the judgment
# in is_program_valid needs to be deleted!!!
def
teller1
(
program_config
,
predictor_config
):
if
program_config
.
ops
[
0
].
attrs
[
'data_format'
]
==
"NHWC"
:
return
True
return
False
# mkldnn Output has diff with bias!
def
teller2
(
program_config
,
predictor_config
):
return
predictor_config
.
mkldnn_enabled
()
and
program_config
.
ops
[
0
].
attrs
[
'has_bias'
]
==
True
self
.
add_ignore_check_case
(
teller1
,
IgnoreReasons
.
PASS_ACCURACY_ERROR
,
"The output format of conv2d is wrong when data_format attribute is NHWC,
\
it will trigger Broadcast dimension mismatch bug
\
when data_format attribute is NHWC and axis of eltwise op is 1 for this pass."
)
self
.
add_ignore_check_case
(
teller2
,
IgnoreReasons
.
PASS_ACCURACY_ERROR
,
"Currently mkldnn Output has diff with bias!"
)
def
test
(
self
):
self
.
run_and_statis
(
quant
=
False
,
passes
=
[
"conv_eltwiseadd_affine_channel_fuse_pass"
],
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录