Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
7a023c05
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
403
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
7a023c05
编写于
11月 26, 2021
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(mge/traced_module): add optimization api
GitOrigin-RevId: eaa74026404a8b3fa7a8d33826612e859a445b0c
上级
6c692b26
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
197 addition
and
0 deletion
+197
-0
imperative/python/megengine/traced_module/__init__.py
imperative/python/megengine/traced_module/__init__.py
+9
-0
imperative/python/megengine/traced_module/_passes/__init__.py
...rative/python/megengine/traced_module/_passes/__init__.py
+12
-0
imperative/python/megengine/traced_module/_passes/optimization.py
...ve/python/megengine/traced_module/_passes/optimization.py
+70
-0
imperative/python/test/unit/traced_module/test_passes.py
imperative/python/test/unit/traced_module/test_passes.py
+106
-0
未找到文件。
imperative/python/megengine/traced_module/__init__.py
浏览文件 @
7a023c05
...
...
@@ -8,6 +8,7 @@
from
..core._imperative_rt.core2
import
set_cpp_apply_module_trace
from
.
import
compat
from
._passes
import
optimize
from
.traced_module
import
(
TracedModule
,
_register_all_builtin_module
,
...
...
@@ -19,3 +20,11 @@ from .traced_module import (
_register_all_builtin_module
()
set_cpp_apply_module_trace
(
cpp_apply_module_trace
)
__all__
=
{
"register_as_builtin"
,
"trace_module"
,
"wrap"
,
"TracedModule"
,
"optimize"
,
}
imperative/python/megengine/traced_module/_passes/__init__.py
0 → 100644
浏览文件 @
7a023c05
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from
.
import
const_pass
,
fold_scale_pass
,
fuse_pass
from
.optimization
import
optimize
__all__
=
[
"optimize"
]
imperative/python/megengine/traced_module/_passes/optimization.py
0 → 100644
浏览文件 @
7a023c05
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from
copy
import
deepcopy
from
typing
import
List
,
Set
from
...logger
import
get_logger
from
..traced_module
import
TracedModule
from
.pass_base
import
get_default_pass_context
,
get_registered_pass
logger
=
get_logger
(
__name__
)
def
optimize
(
module
:
TracedModule
,
enabled_pass
:
List
[
str
]
=
[
"FuseConvBn"
],
)
->
TracedModule
:
r
"""Performs a set of optimization passes to optimize a `TracedModule` for inference.
The following passes are currently supported:
* FuseConvBn: fuse BN layers into to conv2d
* FuseAddMul: fold adjacent const add or mul binary operations
* BackwardFoldScale: backward fold const scaling into weights of conv2d
Args:
module: the :class:`TracedModule` to be optimized.
enabled_pass: optimization passes to be enabled during optimization.
Default: ["FuseConvBn"]
Returns:
the optimized :class:`TracedModule`.
"""
defalut_passes_list
=
[
"FuseConvBn"
,
"FuseAddMul"
,
]
if
isinstance
(
enabled_pass
,
str
):
enabled_pass
=
[
enabled_pass
]
if
"BackwardFoldScale"
in
enabled_pass
:
if
"FuseConvBn"
not
in
enabled_pass
:
logger
.
warning
(
"Since BackwardFoldScale requires FuseConvBn"
", FuseConvBn will be enabled."
)
enabled_pass
.
append
(
"FuseConvBn"
)
defalut_passes_list
.
extend
(
[
"BackwardFoldScale"
,
"FuseAddMul"
,]
)
pass_ctx
=
get_default_pass_context
()
def
run_pass
(
mod
:
TracedModule
):
for
pass_name
in
defalut_passes_list
:
if
pass_name
in
enabled_pass
:
pass_func
=
get_registered_pass
(
pass_name
)()
mod
=
pass_func
(
mod
,
pass_ctx
)
return
mod
module
=
deepcopy
(
module
)
module
=
run_pass
(
module
)
return
module
imperative/python/test/unit/traced_module/test_passes.py
0 → 100644
浏览文件 @
7a023c05
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import
types
import
numpy
as
np
import
pytest
import
megengine
as
mge
import
megengine.functional
as
F
import
megengine.module
as
M
import
megengine.traced_module
as
tm
class
myconv
(
M
.
Conv2d
):
pass
class
mybn
(
M
.
BatchNorm2d
):
pass
class
MyBlock
(
M
.
Module
):
def
__init__
(
self
,
conv_cls
,
bn_cls
):
super
().
__init__
()
self
.
conv
=
conv_cls
(
3
,
3
,
1
,
1
,
0
)
self
.
bn
=
bn_cls
(
3
)
self
.
conv2
=
conv_cls
(
3
,
3
,
1
,
1
,
0
)
self
.
bn2
=
bn_cls
(
3
)
self
.
scale
=
mge
.
Tensor
([
3
,
4
])
def
forward
(
self
,
x
):
x1
=
self
.
conv
(
x
)
x1
=
self
.
bn
(
x1
)
x1
=
F
.
relu
(
x1
)
x1
=
x1
*
self
.
scale
[
0
]
x2
=
self
.
conv2
(
x
)
x2
=
self
.
bn2
(
x2
)
x2
=
F
.
relu
(
x2
)
x2
=
x2
*
self
.
scale
[
1
]
y
=
x1
+
x2
y
=
y
+
4
y
=
self
.
scale
[
0
]
+
y
y
=
F
.
relu
(
y
)
*
3
return
y
class
MyModule
(
M
.
Module
):
def
__init__
(
self
,
conv_cls
,
bn_cls
):
super
().
__init__
()
self
.
block_0
=
MyBlock
(
conv_cls
,
bn_cls
)
self
.
block_1
=
MyBlock
(
conv_cls
,
bn_cls
)
def
forward
(
self
,
x
):
x1
=
self
.
block_0
(
x
)
x2
=
self
.
block_1
(
x
)
y
=
x1
+
x2
y
=
F
.
reshape
(
y
,
(
-
1
))
y
=
y
*
3
return
y
@
pytest
.
mark
.
parametrize
(
"conv_cls"
,
[
M
.
Conv2d
,
myconv
])
@
pytest
.
mark
.
parametrize
(
"bn_cls"
,
[
M
.
BatchNorm2d
,
mybn
])
def
test_backward_fold_scale
(
conv_cls
,
bn_cls
):
module
=
MyModule
(
conv_cls
,
bn_cls
)
module
.
eval
()
inp
=
mge
.
Tensor
(
np
.
random
.
random
((
1
,
3
,
32
,
32
)))
desired
=
module
(
inp
)
traced_net
=
tm
.
trace_module
(
module
,
inp
)
traced_net
=
traced_net
.
flatten
()
optimized_net
=
tm
.
optimize
(
traced_net
,
"BackwardFoldScale"
)
actual
=
optimized_net
(
inp
)
np
.
testing
.
assert_allclose
(
desired
=
desired
,
actual
=
actual
,
atol
=
1e-4
)
# fuse all mul to conv
mul_list
=
optimized_net
.
graph
.
get_method_by_type
(
"__mul__"
).
as_list
()
assert
len
(
mul_list
)
==
0
@
pytest
.
mark
.
parametrize
(
"conv_cls"
,
[
M
.
Conv2d
,
myconv
])
@
pytest
.
mark
.
parametrize
(
"bn_cls"
,
[
M
.
BatchNorm2d
,
mybn
])
def
test_fuse_bn
(
conv_cls
,
bn_cls
):
module
=
MyModule
(
conv_cls
,
bn_cls
)
module
.
eval
()
inp
=
mge
.
Tensor
(
np
.
random
.
random
((
1
,
3
,
32
,
32
)))
desired
=
module
(
inp
)
traced_net
=
tm
.
trace_module
(
module
,
inp
)
traced_net
=
traced_net
.
flatten
()
optimized_net
=
tm
.
optimize
(
traced_net
,
"FuseConvBn"
)
actual
=
optimized_net
(
inp
)
np
.
testing
.
assert_allclose
(
desired
=
desired
,
actual
=
actual
,
atol
=
1e-4
)
# fuse all mul to conv
bn_list
=
optimized_net
.
graph
.
get_function_by_type
(
F
.
batch_norm
).
as_list
()
assert
len
(
bn_list
)
==
0
bn_list
=
optimized_net
.
graph
.
get_module_by_type
(
M
.
BatchNorm2d
).
as_list
()
assert
len
(
bn_list
)
==
0
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录