Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
c51d90d8
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c51d90d8
编写于
6月 05, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
6月 05, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1767 Move LayerNormGrad split pass ahead of kernel select
Merge pull request !1767 from huanghui/LayerNormGrad-split-pass
上级
bd3e8da6
cf87218f
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
4 addition
and
80 deletion
+4
-80
mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc
.../ccsrc/pre_activate/ascend/ascend_backend_optimization.cc
+2
-2
mindspore/ccsrc/pre_activate/ascend/ir_fission/layer_norm_grad_split.cc
...c/pre_activate/ascend/ir_fission/layer_norm_grad_split.cc
+1
-5
mindspore/ccsrc/pre_activate/ascend/ir_fission/layer_norm_grad_split.h
...rc/pre_activate/ascend/ir_fission/layer_norm_grad_split.h
+1
-3
tests/ut/cpp/pre_activate/ascend/ir_fission/layer_norm_grad_split_test.cc
..._activate/ascend/ir_fission/layer_norm_grad_split_test.cc
+0
-70
未找到文件。
mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc
浏览文件 @
c51d90d8
...
...
@@ -145,7 +145,6 @@ void RunOpAscendDataLayout(const std::shared_ptr<session::KernelGraph> &kernel_g
MS_EXCEPTION_IF_NULL
(
kernel_graph
);
auto
optimizer
=
std
::
make_shared
<
GraphOptimizer
>
();
auto
data_layout_pm
=
std
::
make_shared
<
PassManager
>
(
"pynative_transop_pm"
);
data_layout_pm
->
AddPass
(
std
::
make_shared
<
LayerNormGradSplit
>
());
data_layout_pm
->
AddPass
(
std
::
make_shared
<
RunOpInsertTransData
>
());
data_layout_pm
->
AddPass
(
std
::
make_shared
<
GetitemTuple
>
());
data_layout_pm
->
AddPass
(
std
::
make_shared
<
CommonSubexpressionElimination
>
());
...
...
@@ -182,7 +181,6 @@ void AscendDataLayout(const std::shared_ptr<session::KernelGraph> &kernel_graph)
MS_EXCEPTION_IF_NULL
(
kernel_graph
);
auto
optimizer
=
std
::
make_shared
<
GraphOptimizer
>
();
auto
data_layout_pm
=
std
::
make_shared
<
PassManager
>
(
"transop_pm"
);
data_layout_pm
->
AddPass
(
std
::
make_shared
<
LayerNormGradSplit
>
());
data_layout_pm
->
AddPass
(
std
::
make_shared
<
InsertTransOp
>
());
data_layout_pm
->
AddPass
(
std
::
make_shared
<
GetitemTuple
>
());
data_layout_pm
->
AddPass
(
std
::
make_shared
<
CommonSubexpressionElimination
>
());
...
...
@@ -238,6 +236,7 @@ void AscendBackendIRFusionOptimization(const std::shared_ptr<session::KernelGrap
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
BnGradSplit
>
());
}
else
{
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
BatchNormGradSplit
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
LayerNormGradSplit
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
FusedBatchNormFusion
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
FusedBatchNormMixPrecisionFusion0
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
FusedBatchNormMixPrecisionFusion1
>
());
...
...
@@ -282,6 +281,7 @@ void RunOpAscendBackendIRFusionOptimization(const std::shared_ptr<session::Kerne
auto
optimizer
=
std
::
make_shared
<
GraphOptimizer
>
();
auto
ir_fusion_pm
=
std
::
make_shared
<
PassManager
>
(
"ir_fusion_pm"
);
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
BnSplit
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
LayerNormGradSplit
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
TopKSplit
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
AddnFission
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
InsertPadForNMSWithMask
>
());
...
...
mindspore/ccsrc/pre_activate/ascend/ir_fission/layer_norm_grad_split.cc
浏览文件 @
c51d90d8
...
...
@@ -32,7 +32,6 @@ void LayerNormGradSplit::CreateOutputsOfLayerNormXBackprop(
std
::
vector
<
AnfNodePtr
>
*
layer_norm_x_backprop_outputs
)
const
{
MS_EXCEPTION_IF_NULL
(
graph
);
MS_EXCEPTION_IF_NULL
(
layer_norm_grad
);
MS_EXCEPTION_IF_NULL
(
kernel_select_
);
auto
prim
=
std
::
make_shared
<
Primitive
>
(
kLayerNormXBackpropOpName
);
std
::
vector
<
AnfNodePtr
>
layer_norm_x_backprop_inputs
=
{
NewValueNode
(
prim
)};
for
(
size_t
i
=
1
;
i
<
layer_norm_grad
->
inputs
().
size
();
++
i
)
{
...
...
@@ -46,7 +45,6 @@ void LayerNormGradSplit::CreateOutputsOfLayerNormXBackprop(
auto
shapes
=
{
AnfAlgo
::
GetOutputInferShape
(
layer_norm_grad
,
0
)};
AnfAlgo
::
SetOutputInferTypeAndShape
(
types
,
shapes
,
layer_norm_x_backprop
.
get
());
kernel_select_
->
SelectKernel
(
layer_norm_x_backprop
);
(
*
layer_norm_x_backprop_outputs
).
push_back
(
layer_norm_x_backprop
);
}
...
...
@@ -55,7 +53,6 @@ void LayerNormGradSplit::CreateOutputsOfLayerNormBetaGammaBackprop(
std
::
vector
<
AnfNodePtr
>
*
layer_norm_beta_gamma_backprop_outputs
)
const
{
MS_EXCEPTION_IF_NULL
(
graph
);
MS_EXCEPTION_IF_NULL
(
layer_norm_grad
);
MS_EXCEPTION_IF_NULL
(
kernel_select_
);
auto
prim
=
std
::
make_shared
<
Primitive
>
(
kLayerNormBetaGammaBackpropOpName
);
std
::
vector
<
AnfNodePtr
>
layer_norm_beta_gamma_backprop_inputs
=
{
NewValueNode
(
prim
)};
for
(
size_t
i
=
1
;
i
<
layer_norm_grad
->
inputs
().
size
()
-
1
;
++
i
)
{
...
...
@@ -73,10 +70,9 @@ void LayerNormGradSplit::CreateOutputsOfLayerNormBetaGammaBackprop(
AnfAlgo
::
SetOutputInferTypeAndShape
(
types
,
shapes
,
layer_norm_beta_gamma_backprop
.
get
());
// get device shape of LayerNormGrad's 5th Input, and convert it to attr
std
::
vector
<
size_t
>
shape_gamma
=
AnfAlgo
::
Get
InputDevice
Shape
(
layer_norm_grad
,
4
);
std
::
vector
<
size_t
>
shape_gamma
=
AnfAlgo
::
Get
PrevNodeOutputInfer
Shape
(
layer_norm_grad
,
4
);
AnfAlgo
::
SetNodeAttr
(
kAttrShapeGamma
,
MakeValue
(
opt
::
Convert2Int
(
shape_gamma
)),
layer_norm_beta_gamma_backprop
);
kernel_select_
->
SelectKernel
(
layer_norm_beta_gamma_backprop
);
CreateMultipleOutputsOfAnfNode
(
graph
,
layer_norm_beta_gamma_backprop
,
kLayerNormBetaGammaBackpropOutputNum
,
layer_norm_beta_gamma_backprop_outputs
);
}
...
...
mindspore/ccsrc/pre_activate/ascend/ir_fission/layer_norm_grad_split.h
浏览文件 @
c51d90d8
...
...
@@ -26,8 +26,7 @@ namespace mindspore {
namespace
opt
{
class
LayerNormGradSplit
:
public
PatternProcessPass
{
public:
explicit
LayerNormGradSplit
(
bool
multigraph
=
true
)
:
PatternProcessPass
(
"layer_norm_grad_split"
,
multigraph
),
kernel_select_
(
std
::
make_shared
<
KernelSelect
>
())
{}
explicit
LayerNormGradSplit
(
bool
multigraph
=
true
)
:
PatternProcessPass
(
"layer_norm_grad_split"
,
multigraph
)
{}
~
LayerNormGradSplit
()
override
=
default
;
const
BaseRef
DefinePattern
()
const
override
;
const
AnfNodePtr
Process
(
const
FuncGraphPtr
&
,
const
AnfNodePtr
&
,
const
EquivPtr
&
)
const
override
;
...
...
@@ -37,7 +36,6 @@ class LayerNormGradSplit : public PatternProcessPass {
std
::
vector
<
AnfNodePtr
>
*
layer_norm_grad_outputs
)
const
;
void
CreateOutputsOfLayerNormBetaGammaBackprop
(
const
FuncGraphPtr
&
graph
,
const
CNodePtr
&
layer_norm_grad
,
std
::
vector
<
AnfNodePtr
>
*
layer_norm_beta_gamma_outputs
)
const
;
KernelSelectPtr
kernel_select_
;
};
}
// namespace opt
}
// namespace mindspore
...
...
tests/ut/cpp/pre_activate/ascend/ir_fission/layer_norm_grad_split_test.cc
浏览文件 @
c51d90d8
...
...
@@ -39,36 +39,6 @@ class TestHWLayerNormGradSplit : public BackendCommon {
UT
::
PyFuncGraphFetcher
get_py_fun_
;
};
class
MockLayerNormGradSplitKernelSelect
:
public
KernelSelect
{
public:
MockLayerNormGradSplitKernelSelect
()
=
default
;
~
MockLayerNormGradSplitKernelSelect
()
override
=
default
;
void
SelectKernel
(
const
CNodePtr
&
cnode
)
override
{
auto
name
=
AnfAlgo
::
GetCNodeName
(
cnode
);
if
(
name
==
kLayerNormXBackpropOpName
)
{
kernel
::
KernelBuildInfo
::
KernelBuildInfoBuilder
builder
;
builder
.
SetInputsFormat
(
{
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
});
builder
.
SetInputsDeviceType
(
{
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
});
builder
.
SetOutputsFormat
({
kOpFormat_NC1HWC0
});
builder
.
SetOutputsDeviceType
({
kNumberTypeFloat16
});
AnfAlgo
::
SetSelectKernelBuildInfo
(
builder
.
Build
(),
cnode
.
get
());
return
;
}
if
(
name
==
kLayerNormBetaGammaBackpropOpName
)
{
kernel
::
KernelBuildInfo
::
KernelBuildInfoBuilder
builder
;
builder
.
SetInputsFormat
({
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
});
builder
.
SetInputsDeviceType
({
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
});
builder
.
SetOutputsFormat
({
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
});
builder
.
SetOutputsDeviceType
({
kNumberTypeFloat16
,
kNumberTypeFloat16
});
AnfAlgo
::
SetSelectKernelBuildInfo
(
builder
.
Build
(),
cnode
.
get
());
return
;
}
}
};
// namespace opt
TEST_F
(
TestHWLayerNormGradSplit
,
test_layer_norm_grad_split
)
{
get_py_fun_
.
SetDoResolve
(
true
);
FuncGraphPtr
g
=
get_py_fun_
.
CallAndParseRet
(
"test_layer_norm_grad_split"
,
"before"
);
...
...
@@ -81,49 +51,9 @@ TEST_F(TestHWLayerNormGradSplit, test_layer_norm_grad_split) {
auto
kernel_graph
=
GetKernelGraph
(
g
,
args_spec_list
);
EXPECT_NE
(
kernel_graph
,
nullptr
);
// get LayerNormGrad
CNodePtr
ret
=
kernel_graph
->
get_return
();
EXPECT_NE
(
ret
,
nullptr
);
EXPECT_NE
(
ret
->
input
(
1
),
nullptr
);
EXPECT_TRUE
(
ret
->
input
(
1
)
->
isa
<
CNode
>
());
auto
make_tuple1
=
ret
->
input
(
1
)
->
cast
<
CNodePtr
>
();
EXPECT_NE
(
make_tuple1
->
input
(
1
),
nullptr
);
EXPECT_TRUE
(
make_tuple1
->
input
(
1
)
->
isa
<
CNode
>
());
auto
make_tuple2
=
make_tuple1
->
input
(
1
)
->
cast
<
CNodePtr
>
();
EXPECT_NE
(
make_tuple2
->
input
(
1
),
nullptr
);
EXPECT_TRUE
(
make_tuple2
->
input
(
1
)
->
isa
<
CNode
>
());
auto
tuple_getitem
=
make_tuple2
->
input
(
1
)
->
cast
<
CNodePtr
>
();
EXPECT_NE
(
tuple_getitem
->
input
(
1
),
nullptr
);
EXPECT_TRUE
(
tuple_getitem
->
input
(
1
)
->
isa
<
CNode
>
());
auto
layer_norm_grad
=
tuple_getitem
->
input
(
1
)
->
cast
<
CNodePtr
>
();
// set kernel for LayerNormGrad
kernel
::
KernelBuildInfo
::
KernelBuildInfoBuilder
builder1
;
builder1
.
SetInputsFormat
(
{
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
});
builder1
.
SetOutputsFormat
({
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
,
kOpFormat_NC1HWC0
});
builder1
.
SetInputsDeviceType
(
{
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
});
builder1
.
SetOutputsDeviceType
({
kNumberTypeFloat16
,
kNumberTypeFloat16
,
kNumberTypeFloat16
});
builder1
.
SetKernelType
(
TBE_KERNEL
);
AnfAlgo
::
SetSelectKernelBuildInfo
(
builder1
.
Build
(),
layer_norm_grad
.
get
());
// get param5
EXPECT_NE
(
layer_norm_grad
->
input
(
5
),
nullptr
);
auto
param
=
layer_norm_grad
->
input
(
5
);
// set kernel for param5
kernel
::
KernelBuildInfo
::
KernelBuildInfoBuilder
builder2
;
builder2
.
SetOutputsFormat
({
kOpFormat_NC1HWC0
});
builder2
.
SetOutputsDeviceType
({
kNumberTypeFloat16
});
AnfAlgo
::
SetSelectKernelBuildInfo
(
builder2
.
Build
(),
param
.
get
());
// do layer_norm_grad_split pass
auto
optimizer
=
std
::
make_shared
<
opt
::
GraphOptimizer
>
();
auto
pm
=
std
::
make_shared
<
opt
::
PassManager
>
();
auto
pass
=
std
::
make_shared
<
opt
::
LayerNormGradSplit
>
();
auto
kernel_select
=
std
::
make_shared
<
MockLayerNormGradSplitKernelSelect
>
();
pass
->
kernel_select_
=
kernel_select
;
pm
->
AddPass
(
pass
);
optimizer
->
AddPassManager
(
pm
);
auto
new_graph
=
optimizer
->
Optimize
(
kernel_graph
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录