Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
ee87cb9c
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
ee87cb9c
编写于
1月 04, 2023
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(gopt): fix channel padding pass of channel wise conv
GitOrigin-RevId: e98568d0bd3fd2c51c2b033b691557c4f91f5ccf
上级
3e0bb22c
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
75 addition
and
31 deletion
+75
-31
src/gopt/impl/padding_channel.cpp
src/gopt/impl/padding_channel.cpp
+3
-0
src/gopt/test/padding_channel.cpp
src/gopt/test/padding_channel.cpp
+72
-31
未找到文件。
src/gopt/impl/padding_channel.cpp
浏览文件 @
ee87cb9c
...
...
@@ -316,6 +316,9 @@ OperatorNodeBase* PaddingChannelPass::padding_channel_wise_conv_policy(
size_t
pad_channels_1
=
new_in_channels
-
group
;
if
(
pad_channels_1
)
{
inps
[
1
]
=
pad_in_channels
(
new_inp
[
1
],
pad_channels_1
);
if
(
inps
.
size
()
>=
3
)
{
inps
[
2
]
=
pad_in_channels
(
new_inp
[
2
],
pad_channels_1
);
}
m_padding_oprs
.
insert
(
opr
);
}
}
...
...
src/gopt/test/padding_channel.cpp
浏览文件 @
ee87cb9c
...
...
@@ -60,7 +60,11 @@ T* find_opr(SymbolVar endpoint, const std::string& node_name) {
}
}
// namespace
TEST
(
TestGoptInference
,
ChannelPaddingNCHW44
)
{
template
<
opr
::
Convolution
::
Param
::
Format
T_format
>
void
check_channel_padding_conv
()
{
size_t
scalar
=
1
;
if
(
T_format
==
opr
::
Convolution
::
Param
::
Format
::
NCHW88
)
scalar
=
2
;
HostTensorGenerator
<>
gen
;
auto
cn
=
CompNode
::
load
(
"cpu0"
);
auto
graph
=
ComputingGraph
::
make
();
...
...
@@ -69,74 +73,103 @@ TEST(TestGoptInference, ChannelPaddingNCHW44) {
return
opr
::
SharedDeviceTensor
::
make
(
*
graph
,
*
gen
(
shp
,
cn
)).
rename
(
name
);
};
auto
host_x
=
gen
({
1
,
3
,
8
,
8
},
cn
);
auto
host_x
=
gen
({
1
,
3
*
scalar
,
8
,
8
},
cn
);
auto
x
=
opr
::
Host2DeviceCopy
::
make
(
*
graph
,
host_x
);
//! Hybrid nchw44 mode
opr
::
ConvBias
::
Param
param_conv
;
param_conv
.
pad_h
=
param_conv
.
pad_w
=
1
;
auto
w1
=
mkcvar
(
"w1"
,
{
8
,
3
,
3
,
3
}),
b1
=
mkcvar
(
"w1"
,
{
1
,
8
,
1
,
1
}),
auto
w1
=
mkcvar
(
"w1"
,
{
8
*
scalar
,
3
*
scalar
,
3
,
3
}),
b1
=
mkcvar
(
"b1"
,
{
1
,
8
*
scalar
,
1
,
1
}),
conv1
=
opr
::
ConvBias
::
make
(
x
,
w1
,
b1
,
param_conv
,
{},
OperatorNodeConfig
(
"conv1"
));
auto
w2
=
mkcvar
(
"w2"
,
{
6
,
8
,
3
,
3
}),
b2
=
mkcvar
(
"b2"
,
{
1
,
6
,
1
,
1
}),
auto
w2
=
mkcvar
(
"w2"
,
{
6
*
scalar
,
8
*
scalar
,
3
,
3
}),
b2
=
mkcvar
(
"b2"
,
{
1
,
6
*
scalar
,
1
,
1
}),
conv2
=
opr
::
ConvBias
::
make
(
conv1
,
w2
,
b2
,
param_conv
,
{},
OperatorNodeConfig
(
"conv2"
));
auto
w3
=
mkcvar
(
"w3"
,
{
3
,
6
,
3
,
3
}),
b3
=
mkcvar
(
"b3"
,
{
1
,
3
,
1
,
1
}),
auto
w3
=
mkcvar
(
"w3"
,
{
3
*
scalar
,
6
*
scalar
,
3
,
3
}),
b3
=
mkcvar
(
"b3"
,
{
1
,
3
*
scalar
,
1
,
1
}),
conv3
=
opr
::
ConvBias
::
make
(
conv2
,
w3
,
b3
,
param_conv
,
{},
OperatorNodeConfig
(
"conv3"
));
//! channel wise conv bias
opr
::
ConvBias
::
Param
param_channel_conv_bias
;
param_channel_conv_bias
.
sparse
=
opr
::
ConvBias
::
Param
::
Sparse
::
GROUP
;
auto
w4
=
mkcvar
(
"w4"
,
{
3
*
scalar
,
1
,
1
,
1
,
1
}),
b4
=
mkcvar
(
"b4"
,
{
1
,
3
*
scalar
,
1
,
1
}),
conv4
=
opr
::
ConvBias
::
make
(
conv3
,
w4
,
b4
,
param_channel_conv_bias
,
{},
OperatorNodeConfig
(
"conv4"
));
opr
::
Convolution
::
Param
param_convolution
;
param_convolution
.
sparse
=
opr
::
Convolution
::
Param
::
Sparse
::
GROUP
;
//! channel wise convolution
auto
w4
=
mkcvar
(
"w4"
,
{
3
,
1
,
1
,
1
,
1
}),
conv4
=
opr
::
Convolution
::
make
(
conv3
,
w4
,
param_convolution
,
{},
OperatorNodeConfig
(
"conv4"
));
param_convolution
.
sparse
=
opr
::
Convolution
::
Param
::
Sparse
::
DENSE
;
auto
w5
=
mkcvar
(
"w5"
,
{
6
,
3
,
1
,
1
}),
auto
w5
=
mkcvar
(
"w5"
,
{
3
*
scalar
,
1
,
1
,
1
,
1
}),
conv5
=
opr
::
Convolution
::
make
(
conv4
,
w5
,
param_convolution
,
{},
OperatorNodeConfig
(
"conv5"
));
//! group convolution
param_convolution
.
sparse
=
opr
::
Convolution
::
Param
::
Sparse
::
GROUP
;
auto
w6
=
mkcvar
(
"w6"
,
{
2
,
4
,
3
,
1
,
1
}),
param_convolution
.
sparse
=
opr
::
Convolution
::
Param
::
Sparse
::
DENSE
;
auto
w6
=
mkcvar
(
"w6"
,
{
6
*
scalar
,
3
*
scalar
,
1
,
1
}),
conv6
=
opr
::
Convolution
::
make
(
conv5
,
w6
,
param_convolution
,
{},
OperatorNodeConfig
(
"conv6"
));
//! group convolution
param_convolution
.
sparse
=
opr
::
Convolution
::
Param
::
Sparse
::
GROUP
;
auto
w7
=
mkcvar
(
"w7"
,
{
2
*
scalar
,
4
,
3
,
1
,
1
}),
conv7
=
opr
::
Convolution
::
make
(
conv6
,
w7
,
param_convolution
,
{},
OperatorNodeConfig
(
"conv7"
));
param_convolution
.
sparse
=
opr
::
Convolution
::
Param
::
Sparse
::
DENSE
;
auto
w
7
=
mkcvar
(
"w7"
,
{
3
,
8
,
1
,
1
}),
auto
w
8
=
mkcvar
(
"w8"
,
{
3
*
scalar
,
8
*
scalar
,
1
,
1
}),
y
=
opr
::
Convolution
::
make
(
conv
6
,
w7
,
param_convolution
,
{},
OperatorNodeConfig
(
"conv7
"
));
conv
7
,
w8
,
param_convolution
,
{},
OperatorNodeConfig
(
"conv8
"
));
SymbolVar
y_opt
;
auto
options
=
gopt
::
OptimizeForInferenceOptions
{};
options
.
enable_fuse_conv_bias_nonlinearity
();
options
.
enable_nchw44
();
if
(
T_format
==
opr
::
Convolution
::
Param
::
Format
::
NCHW44
)
options
.
enable_nchw44
();
else
if
(
T_format
==
opr
::
Convolution
::
Param
::
Format
::
NCHW88
)
options
.
enable_nchw88
();
unpack_vector
(
gopt
::
optimize_for_inference
({
y
},
options
),
y_opt
);
auto
conv1_opt
=
find_opr
<
opr
::
ConvBias
>
(
y_opt
,
"conv1"
);
auto
conv2_opt
=
find_opr
<
opr
::
ConvBias
>
(
y_opt
,
"conv2"
);
auto
conv3_opt
=
find_opr
<
opr
::
ConvBias
>
(
y_opt
,
"conv3"
);
auto
conv4_opt
=
find_opr
<
opr
::
Convolution
>
(
y_opt
,
"conv4"
);
auto
conv6_opt
=
find_opr
<
opr
::
Convolution
>
(
y_opt
,
"conv6"
);
auto
conv4_opt
=
find_opr
<
opr
::
ConvBias
>
(
y_opt
,
"conv4"
);
auto
conv5_opt
=
find_opr
<
opr
::
Convolution
>
(
y_opt
,
"conv5"
);
auto
conv7_opt
=
find_opr
<
opr
::
Convolution
>
(
y_opt
,
"conv7"
);
//! do not padding input tensor
ASSERT_EQ
(
conv1_opt
->
input
(
0
)
->
shape
()[
1
],
3
);
ASSERT_EQ
(
opr
::
Convolution
::
Param
::
Format
::
NCHW44
,
conv1_opt
->
param
().
format
);
ASSERT_EQ
(
conv1_opt
->
input
(
0
)
->
shape
()[
1
],
3
*
scalar
);
ASSERT_EQ
(
T_format
,
conv1_opt
->
param
().
format
);
//! output tensor padding input tensor
ASSERT_EQ
(
conv2_opt
->
input
(
1
)
->
shape
()[
0
],
2
);
ASSERT_EQ
(
opr
::
Convolution
::
Param
::
Format
::
NCHW44
,
conv2_opt
->
param
().
format
);
ASSERT_EQ
(
conv2_opt
->
input
(
2
)
->
shape
()[
1
],
2
);
ASSERT_EQ
(
T_format
,
conv2_opt
->
param
().
format
);
ASSERT_EQ
(
conv3_opt
->
input
(
1
)
->
shape
()[
0
],
1
);
ASSERT_EQ
(
opr
::
Convolution
::
Param
::
Format
::
NCHW44
,
conv3_opt
->
param
().
format
);
ASSERT_EQ
(
conv3_opt
->
input
(
2
)
->
shape
()[
1
],
1
);
ASSERT_EQ
(
T_format
,
conv3_opt
->
param
().
format
);
ASSERT_EQ
(
conv4_opt
->
input
(
1
)
->
shape
()[
0
],
1
);
ASSERT_EQ
(
opr
::
Convolution
::
Param
::
Format
::
NCHW44
,
conv4_opt
->
param
().
format
);
ASSERT_EQ
(
conv6_opt
->
input
(
0
)
->
shape
()[
1
],
6
);
ASSERT_EQ
(
opr
::
Convolution
::
Param
::
Format
::
NCHW
,
conv6_opt
->
param
().
format
);
ASSERT_EQ
(
conv4_opt
->
input
(
2
)
->
shape
()[
1
],
1
);
ASSERT_EQ
(
T_format
,
conv4_opt
->
param
().
format
);
ASSERT_EQ
(
conv5_opt
->
input
(
1
)
->
shape
()[
0
],
1
);
ASSERT_EQ
(
T_format
,
conv5_opt
->
param
().
format
);
ASSERT_EQ
(
conv7_opt
->
input
(
0
)
->
shape
()[
1
],
6
*
scalar
);
ASSERT_EQ
(
opr
::
Convolution
::
Param
::
Format
::
NCHW
,
conv7_opt
->
param
().
format
);
//! the dst tensor channel must stay unchange
ASSERT_EQ
(
y_opt
.
node
()
->
shape
()[
1
],
3
);
graph
->
compile
({{
y_opt
,
{}}})
->
to_json
()
->
writeto_fpath
(
output_file
(
"TestGoptInference.ChannelPaddingNCHW44.json"
));
ASSERT_EQ
(
y_opt
.
node
()
->
shape
()[
1
],
3
*
scalar
);
if
(
T_format
==
opr
::
Convolution
::
Param
::
Format
::
NCHW44
)
graph
->
compile
({{
y_opt
,
{}}})
->
to_json
()
->
writeto_fpath
(
output_file
(
"TestGoptInference.ChannelPaddingNCHW44.json"
));
else
if
(
T_format
==
opr
::
Convolution
::
Param
::
Format
::
NCHW88
)
graph
->
compile
({{
y_opt
,
{}}})
->
to_json
()
->
writeto_fpath
(
output_file
(
"TestGoptInference.ChannelPaddingNCHW88.json"
));
HostTensorND
host_y_opt
,
host_y
;
auto
func
=
graph
->
compile
(
...
...
@@ -145,11 +178,19 @@ TEST(TestGoptInference, ChannelPaddingNCHW44) {
MGB_ASSERT_TENSOR_NEAR
(
host_y
,
host_y_opt
,
1e-2
);
//! test change the input shape
*
host_x
=
*
gen
({
2
,
3
,
32
,
32
},
cn
);
*
host_x
=
*
gen
({
2
,
3
*
scalar
,
32
,
32
},
cn
);
func
->
execute
();
MGB_ASSERT_TENSOR_NEAR
(
host_y
,
host_y_opt
,
1e-2
);
}
TEST
(
TestGoptInference
,
ChannelPaddingNCHW44
)
{
check_channel_padding_conv
<
opr
::
Convolution
::
Param
::
Format
::
NCHW44
>
();
}
TEST
(
TestGoptInference
,
ChannelPaddingNCHW88
)
{
check_channel_padding_conv
<
opr
::
Convolution
::
Param
::
Format
::
NCHW88
>
();
}
TEST
(
TestGoptInference
,
ChannelPaddingSubtensor
)
{
HostTensorGenerator
<>
gen
;
auto
cn
=
CompNode
::
load
(
"cpu0"
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录