Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
8d1f3025
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
404
Star
4705
Fork
582
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
8d1f3025
编写于
10月 12, 2020
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix(mge): fix batch norm dump
GitOrigin-RevId: eb739437ef48fc6e8ddf55a9ebc54e8979b55cbd
上级
40e778fb
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
27 addition
and
9 deletion
+27
-9
imperative/python/test/integration/test_trace_dump.py
imperative/python/test/integration/test_trace_dump.py
+4
-0
src/opr/impl/dnn/batch_norm.cpp
src/opr/impl/dnn/batch_norm.cpp
+21
-9
src/opr/include/megbrain/opr/dnn/batch_norm.h
src/opr/include/megbrain/opr/dnn/batch_norm.h
+2
-0
未找到文件。
imperative/python/test/integration/test_trace_dump.py
浏览文件 @
8d1f3025
...
...
@@ -47,13 +47,17 @@ class XORNet(M.Module):
self
.
num_class
=
2
super
().
__init__
()
self
.
fc0
=
M
.
Linear
(
self
.
num_class
,
self
.
mid_dim
,
bias
=
True
)
self
.
bn0
=
M
.
BatchNorm1d
(
self
.
mid_dim
)
self
.
fc1
=
M
.
Linear
(
self
.
mid_dim
,
self
.
mid_dim
,
bias
=
True
)
self
.
bn1
=
M
.
BatchNorm1d
(
self
.
mid_dim
)
self
.
fc2
=
M
.
Linear
(
self
.
mid_dim
,
self
.
num_class
,
bias
=
True
)
def
forward
(
self
,
x
):
x
=
self
.
fc0
(
x
)
x
=
self
.
bn0
(
x
)
x
=
F
.
tanh
(
x
)
x
=
self
.
fc1
(
x
)
x
=
self
.
bn1
(
x
)
x
=
F
.
tanh
(
x
)
x
=
self
.
fc2
(
x
)
return
x
...
...
src/opr/impl/dnn/batch_norm.cpp
浏览文件 @
8d1f3025
...
...
@@ -44,7 +44,7 @@ BatchNormForward::BatchNormForward(VarNode *x,
m_force_inplace
=
false
;
}
if
(
m_force_inplace
)
{
if
(
m_force_inplace
&&
param
.
fwd_mode
==
Param
::
FwdMode
::
TRAINING
)
{
auto
check_dest
=
[
&
](
VarNode
*
dest
)
{
auto
dest_opr
=
dest
->
owner_opr
();
mgb_throw_if
(
!
(
dest_opr
->
same_type
<
SharedDeviceTensor
>
()
||
...
...
@@ -62,7 +62,14 @@ BatchNormForward::BatchNormForward(VarNode *x,
add_input
({
x
,
scale
,
bias
,
mean
,
variance
});
if
(
m_force_inplace
)
{
if
(
param
.
fwd_mode
==
Param
::
FwdMode
::
INFERENCE
)
{
auto
mark_empty_var
=
[
&
](
VarNode
*
var
)
{
var
->
add_flag
(
VarNode
::
Flag
::
ALLOW_EMPTY_SHAPE
)
.
add_flag
(
VarNode
::
Flag
::
VOLATILE_CONTENT
);
};
mark_empty_var
(
output
(
0
));
mark_empty_var
(
output
(
1
));
}
else
if
(
m_force_inplace
)
{
output
(
0
)
->
set_fwd_in2out_writable_force
(
input
(
3
)).
add_flag
(
VarNode
::
Flag
::
NO_MEM_RECLAIM
);
...
...
@@ -129,7 +136,7 @@ SymbolVarArray BatchNormForward::make(SymbolVar x,
cg
::
OperatorNodeBase
::
NodeProp
*
BatchNormForward
::
do_make_node_prop
()
const
{
auto
ret
=
Super
::
do_make_node_prop
();
if
(
input
().
size
()
==
5
)
{
if
(
need_stats
()
)
{
ret
->
add_flag
(
NodeProp
::
Flag
::
FORCE_UPDATE_INPUT_VAR
);
}
return
ret
;
...
...
@@ -140,7 +147,7 @@ void BatchNormForward::scn_do_execute() {
auto
&&
y
=
output
(
4
)
->
dev_tensor
();
mgb_assert
(
x
.
layout
().
is_contiguous
()
&&
y
.
layout
().
is_contiguous
());
if
(
input
().
size
()
==
5
)
{
// need running mean/variance
if
(
need_stats
())
{
auto
&&
o0
=
output
(
0
)
->
dev_tensor
(),
&&
o1
=
output
(
1
)
->
dev_tensor
(),
&&
i0
=
input
(
3
)
->
dev_tensor
(),
...
...
@@ -164,8 +171,14 @@ void BatchNormForward::scn_do_execute() {
}
auto
scale
=
input
(
1
)
->
dev_tensor
().
as_megdnn
();
auto
bias
=
input
(
2
)
->
dev_tensor
().
as_megdnn
();
auto
mean
=
output
(
0
)
->
dev_tensor
().
as_megdnn
();
auto
variance
=
output
(
1
)
->
dev_tensor
().
as_megdnn
();
megdnn
::
TensorND
mean
,
variance
;
if
(
param
().
fwd_mode
==
Param
::
FwdMode
::
INFERENCE
)
{
mean
=
input
(
3
)
->
dev_tensor
().
as_megdnn
();
variance
=
input
(
4
)
->
dev_tensor
().
as_megdnn
();
}
else
{
mean
=
output
(
0
)
->
dev_tensor
().
as_megdnn
();
variance
=
output
(
1
)
->
dev_tensor
().
as_megdnn
();
}
auto
save_mean
=
output
(
2
)
->
dev_tensor
().
as_megdnn
();
auto
save_variance
=
output
(
3
)
->
dev_tensor
().
as_megdnn
();
auto
workspace
=
intl
::
get_megdnn_workspace_from_var
(
output
().
back
());
...
...
@@ -180,12 +193,11 @@ void BatchNormForward::add_input_layout_constraint() {
void
BatchNormForward
::
get_output_var_shape
(
const
TensorShapeArray
&
inp_shape
,
TensorShapeArray
&
out_shape
)
const
{
size_t
nr_inp
=
input
().
size
();
out_shape
[
4
]
=
inp_shape
[
0
];
for
(
size_t
i
=
0
;
i
<
4
;
++
i
)
{
out_shape
[
i
]
=
inp_shape
[
1
];
}
if
(
nr_inp
==
3
)
{
if
(
!
need_stats
()
)
{
out_shape
[
0
]
=
out_shape
[
1
]
=
{
0
};
}
}
...
...
@@ -221,7 +233,7 @@ void BatchNormForward::init_output_dtype() {
}
void
BatchNormForward
::
mem_plan_fwd_in2out_writable
()
{
if
(
!
m_force_inplace
&&
input
().
size
()
==
5
)
{
if
(
need_stats
()
&&
!
m_force_inplace
)
{
// TODO: testing
output
(
0
)
->
set_fwd_in2out_writable
(
input
(
3
));
output
(
1
)
->
set_fwd_in2out_writable
(
input
(
4
));
...
...
src/opr/include/megbrain/opr/dnn/batch_norm.h
浏览文件 @
8d1f3025
...
...
@@ -79,6 +79,8 @@ MGB_DEFINE_OPR_CLASS(BatchNormForward,
// if set to True, running mean/variance will be updated inplace
bool
m_force_inplace
=
true
;
// need running mean/variance
bool
need_stats
()
const
{
return
input
().
size
()
==
5
&&
param
().
fwd_mode
==
Param
::
FwdMode
::
TRAINING
;}
};
using
BatchNorm
=
BatchNormForward
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录