Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
5f15f759
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
5f15f759
编写于
10月 20, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
test(mgb/gopt): add a testcase for SubGraphExtractor with multiple outputs
GitOrigin-RevId: 7785bdc8c090467cf75c864cb4056a0cb2059199
上级
a6230ba9
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
66 addition
and
38 deletion
+66
-38
src/gopt/impl/global_layout_transform/layout_transform_context.cpp
...impl/global_layout_transform/layout_transform_context.cpp
+13
-12
src/gopt/impl/global_layout_transform/opr_tensor_formats_config.cpp
...mpl/global_layout_transform/opr_tensor_formats_config.cpp
+15
-26
src/gopt/test/subgraph_extractor.cpp
src/gopt/test/subgraph_extractor.cpp
+38
-0
未找到文件。
src/gopt/impl/global_layout_transform/layout_transform_context.cpp
浏览文件 @
5f15f759
...
...
@@ -103,20 +103,21 @@ std::unique_ptr<LayoutTransformContext> make_arm_ctx(
DNN_INC_FLOAT16
(
TensorFormats
::
NCHWc8
)};
Attribute
attribute
=
{
base_opr_format
,
base_tensor_format
,
Target
::
ARM
};
auto
ctx
=
std
::
make_unique
<
LayoutTransformContext
>
(
std
::
move
(
opr_list
),
std
::
move
(
available_tensor_formats
),
attribute
);
std
::
move
(
opr_list
),
std
::
move
(
available_tensor_formats
),
attribute
);
ctx
->
add_opr_config
(
opr
::
ConvBiasForward
::
typeinfo
(),
{
OprFormat
::
NCHW
,
OprFormat
::
NCHW44
,
DNN_INC_FLOAT16
(
OprFormat
::
NCHW88
),
OprFormat
::
NCHW44_DOT
})
{
OprFormat
::
NCHW
,
OprFormat
::
NCHW44
,
DNN_INC_FLOAT16
(
OprFormat
::
NCHW88
),
OprFormat
::
NCHW44_DOT
})
.
add_opr_config
(
opr
::
ConvolutionForward
::
typeinfo
(),
{
OprFormat
::
NCHW
,
OprFormat
::
NCHW44
,
DNN_INC_FLOAT16
(
OprFormat
::
NCHW88
),
OprFormat
::
NCHW44_DOT
})
.
add_opr_config
(
opr
::
PoolingForward
::
typeinfo
(),
.
add_opr_config
(
opr
::
PoolingForward
::
typeinfo
(),
{
OprFormat
::
NCHW
,
OprFormat
::
NCHW44
,
DNN_INC_FLOAT16
(
OprFormat
::
NCHW88
)})
.
add_opr_config
(
opr
::
ResizeForward
::
typeinfo
(),
.
add_opr_config
(
opr
::
ResizeForward
::
typeinfo
(),
{
OprFormat
::
NCHW
,
OprFormat
::
NCHW44
,
DNN_INC_FLOAT16
(
OprFormat
::
NCHW88
)});
return
ctx
;
...
...
src/gopt/impl/global_layout_transform/opr_tensor_formats_config.cpp
浏览文件 @
5f15f759
...
...
@@ -80,8 +80,7 @@ struct OprSingleInOutTensorFormatsDispatcherImpl<OprFormat::NCHW> {
template
<
>
struct
OprSingleInOutTensorFormatsDispatcherImpl
<
OprFormat
::
NCHW44
>
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
OprTensorFormatsConfiguration
config
;
config
.
typeinfo
=
opr
->
dyn_typeinfo
();
config
.
opr_format
=
OprFormat
::
NCHW44
;
...
...
@@ -101,8 +100,7 @@ struct OprSingleInOutTensorFormatsDispatcherImpl<OprFormat::NCHW44> {
#if !MEGDNN_DISABLE_FLOAT16
template
<
>
struct
OprSingleInOutTensorFormatsDispatcherImpl
<
OprFormat
::
NCHW88
>
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
OprTensorFormatsConfiguration
config
;
config
.
typeinfo
=
opr
->
dyn_typeinfo
();
config
.
opr_format
=
OprFormat
::
NCHW88
;
...
...
@@ -440,8 +438,7 @@ struct ConvTensorFormatsDispatcherImpl<Opr, OprFormat::CHWN4> {
template
<
typename
Opr
>
struct
ConvTensorFormatsDispatcherImpl
<
Opr
,
OprFormat
::
NCHW44
>
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
const
auto
&
conv
=
opr
->
cast_final_safe
<
Opr
>
();
OprTensorFormatsConfiguration
config
;
config
.
typeinfo
=
opr
->
dyn_typeinfo
();
...
...
@@ -451,8 +448,7 @@ struct ConvTensorFormatsDispatcherImpl<Opr, OprFormat::NCHW44> {
for
(
size_t
i
=
0
;
i
<
opr
->
input
().
size
();
++
i
)
{
available
&=
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Float32
;
config
.
input_dtypes
.
emplace_back
(
opr
->
input
(
i
)
->
dtype
().
enumv
());
TensorType
tensor_type
=
i
==
1
?
TensorType
::
WEIGHT
:
TensorType
::
FEATURE
;
TensorType
tensor_type
=
i
==
1
?
TensorType
::
WEIGHT
:
TensorType
::
FEATURE
;
config
.
input_tensor_types
.
emplace_back
(
tensor_type
);
}
available
&=
opr
->
output
(
0
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Float32
;
...
...
@@ -484,8 +480,7 @@ struct ConvTensorFormatsDispatcherImpl<Opr, OprFormat::NCHW44> {
#if !MEGDNN_DISABLE_FLOAT16
template
<
typename
Opr
>
struct
ConvTensorFormatsDispatcherImpl
<
Opr
,
OprFormat
::
NCHW88
>
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
const
auto
&
conv
=
opr
->
cast_final_safe
<
Opr
>
();
OprTensorFormatsConfiguration
config
;
config
.
typeinfo
=
opr
->
dyn_typeinfo
();
...
...
@@ -495,8 +490,7 @@ struct ConvTensorFormatsDispatcherImpl<Opr, OprFormat::NCHW88> {
for
(
size_t
i
=
0
;
i
<
opr
->
input
().
size
();
++
i
)
{
available
&=
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Float16
;
config
.
input_dtypes
.
emplace_back
(
opr
->
input
(
i
)
->
dtype
().
enumv
());
TensorType
tensor_type
=
i
==
1
?
TensorType
::
WEIGHT
:
TensorType
::
FEATURE
;
TensorType
tensor_type
=
i
==
1
?
TensorType
::
WEIGHT
:
TensorType
::
FEATURE
;
config
.
input_tensor_types
.
emplace_back
(
tensor_type
);
}
available
&=
opr
->
output
(
0
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Float16
;
...
...
@@ -528,8 +522,7 @@ struct ConvTensorFormatsDispatcherImpl<Opr, OprFormat::NCHW88> {
template
<
typename
Opr
>
struct
ConvTensorFormatsDispatcherImpl
<
Opr
,
OprFormat
::
NCHW44_DOT
>
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
static
Maybe
<
OprTensorFormatsConfiguration
>
dispatch
(
const
OperatorNodeBase
*
opr
)
{
const
auto
&
conv
=
opr
->
cast_final_safe
<
Opr
>
();
OprTensorFormatsConfiguration
config
;
config
.
typeinfo
=
opr
->
dyn_typeinfo
();
...
...
@@ -538,21 +531,17 @@ struct ConvTensorFormatsDispatcherImpl<Opr, OprFormat::NCHW44_DOT> {
// setup dtypes
for
(
size_t
i
=
0
;
i
<
opr
->
input
().
size
();
++
i
)
{
if
(
i
==
2
)
{
available
&=
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
QuantizedS32
;
available
&=
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
QuantizedS32
;
}
else
{
available
&=
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
QuantizedS8
||
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Quantized8Asymm
;
available
&=
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
QuantizedS8
||
opr
->
input
(
i
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Quantized8Asymm
;
}
config
.
input_dtypes
.
emplace_back
(
opr
->
input
(
i
)
->
dtype
().
enumv
());
TensorType
tensor_type
=
i
==
1
?
TensorType
::
WEIGHT
:
TensorType
::
FEATURE
;
TensorType
tensor_type
=
i
==
1
?
TensorType
::
WEIGHT
:
TensorType
::
FEATURE
;
config
.
input_tensor_types
.
emplace_back
(
tensor_type
);
}
available
&=
opr
->
output
(
0
)
->
dtype
().
enumv
()
==
DTypeEnum
::
QuantizedS8
||
available
&=
opr
->
output
(
0
)
->
dtype
().
enumv
()
==
DTypeEnum
::
QuantizedS8
||
opr
->
output
(
0
)
->
dtype
().
enumv
()
==
DTypeEnum
::
Quantized8Asymm
;
config
.
output_dtypes
.
emplace_back
(
opr
->
output
(
0
)
->
dtype
().
enumv
());
// setup tensor formats
...
...
src/gopt/test/subgraph_extractor.cpp
浏览文件 @
5f15f759
...
...
@@ -264,4 +264,42 @@ TEST(TestSubGraphExtractor, Complicated) {
output_file
(
ssprintf
(
"%s.json"
,
prefix
).
c_str
()));
}
TEST
(
TestSubGraphExtractor
,
SubGraphWithMultipleOutputs
)
{
HostTensorGenerator
<>
gen
;
auto
graph
=
ComputingGraph
::
make
();
auto
mkvar
=
[
&
](
const
char
*
name
,
const
TensorShape
&
shp
)
{
return
opr
::
Host2DeviceCopy
::
make
(
*
graph
,
gen
(
shp
)).
rename
(
name
);
};
auto
mkcvar
=
[
&
](
const
char
*
name
,
const
TensorShape
&
shp
)
{
return
opr
::
SharedDeviceTensor
::
make
(
*
graph
,
*
gen
(
shp
)).
rename
(
name
);
};
graph
->
options
().
graph_opt_level
=
0
;
auto
x
=
mkvar
(
"x"
,
{
8
,
8
,
8
,
8
}),
w
=
mkcvar
(
"w"
,
{
4
,
8
,
3
,
3
});
opr
::
Convolution
::
Param
param
;
param
.
pad_h
=
param
.
pad_w
=
1
;
auto
c
=
opr
::
Convolution
::
make
(
x
,
w
,
param
);
auto
neg_c
=
-
c
;
auto
z
=
opr
::
Concat
::
make
({
c
,
neg_c
},
1
);
using
OprList
=
SubGraphExtractor
::
OprList
;
static
const
OprList
opr_list
=
{
opr
::
ConvolutionForward
::
typeinfo
(),
opr
::
Elemwise
::
typeinfo
(),
};
SubGraphExtractor
extractor
(
opr_list
);
auto
partitions
=
extractor
.
extract
({
z
});
ASSERT_EQ
(
partitions
.
size
(),
1u
);
ASSERT_EQ
(
partitions
[
0
].
output
().
size
(),
2u
);
ASSERT_TRUE
(
partitions
[
0
].
output
().
count
(
c
.
node
())
>
0
);
ASSERT_TRUE
(
partitions
[
0
].
output
().
count
(
neg_c
.
node
())
>
0
);
ASSERT_EQ
(
partitions
[
0
].
input
().
size
(),
2u
);
ASSERT_TRUE
(
partitions
[
0
].
input
().
count
(
x
.
node
())
>
0
);
partitions
[
0
].
to_json
()
->
writeto_fpath
(
output_file
(
"TestSubGraphExtractor.SubGraphMultipleOuputs.json"
));
}
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录