Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
b3c6da90
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b3c6da90
编写于
6月 02, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
6月 02, 2020
浏览文件
操作
浏览文件
下载
差异文件
!1714 Add 2 patterns for SoftmaxGradExt fusion pass
Merge pull request !1714 from huanghui/SoftmaxGradExt-fussion-pass
上级
2aa1020d
d1cec14a
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
123 addition
and
3 deletion
+123
-3
mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc
.../ccsrc/pre_activate/ascend/ascend_backend_optimization.cc
+2
-0
mindspore/ccsrc/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion.cc
.../pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion.cc
+19
-1
mindspore/ccsrc/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion.h
...c/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion.h
+20
-2
tests/ut/cpp/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion_test.cc
...activate/ascend/ir_fusion/softmax_grad_ext_fusion_test.cc
+42
-0
tests/ut/cpp/python_input/gtest_input/pre_activate/softmax_grad_ext_fusion.py
...input/gtest_input/pre_activate/softmax_grad_ext_fusion.py
+40
-0
未找到文件。
mindspore/ccsrc/pre_activate/ascend/ascend_backend_optimization.cc
浏览文件 @
b3c6da90
...
...
@@ -100,6 +100,8 @@ void AddAscendBackendOptionalIRFusion(PassManager *ir_fusion_pm) {
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
ClipByNormNoDivSquareSumFusion
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
LambUpdateWithLRRuleFusion
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
SoftmaxGradExtFusion
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
SoftmaxGradExtFusionV2
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
SoftmaxGradExtFusionV3
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
ConfusionMulGradFusion
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
ConfusionSoftmaxGradRule
>
());
ir_fusion_pm
->
AddPass
(
std
::
make_shared
<
LambNextMVWithDecayRuleCond1
>
());
...
...
mindspore/ccsrc/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion.cc
浏览文件 @
b3c6da90
...
...
@@ -31,6 +31,24 @@ const BaseRef SoftmaxGradExtFusion::DefinePattern() const {
return
mul_grad
;
}
const
BaseRef
SoftmaxGradExtFusionV2
::
DefinePattern
()
const
{
VectorRef
mul
({
prim
::
kPrimMul
,
input1_
,
input0_
});
VectorRef
sum
({
sum_var_
,
mul
});
VectorRef
sub
({
prim
::
kPrimSub
,
input0_
,
sum
});
VectorRef
mul1
({
prim
::
kPrimMul
,
input1_
,
sub
});
VectorRef
mul_grad
({
prim
::
kPrimMul
,
input2_
,
mul1
});
return
mul_grad
;
}
const
BaseRef
SoftmaxGradExtFusionV3
::
DefinePattern
()
const
{
VectorRef
mul
({
prim
::
kPrimMul
,
input1_
,
input0_
});
VectorRef
sum
({
sum_var_
,
mul
});
VectorRef
sub
({
prim
::
kPrimSub
,
input0_
,
sum
});
VectorRef
mul1
({
prim
::
kPrimMul
,
input1_
,
sub
});
VectorRef
mul_grad
({
prim
::
kPrimMul
,
mul1
,
input2_
});
return
mul_grad
;
}
const
AnfNodePtr
SoftmaxGradExtFusion
::
Process
(
const
FuncGraphPtr
&
graph
,
const
AnfNodePtr
&
node
,
const
EquivPtr
&
equiv
)
const
{
MS_EXCEPTION_IF_NULL
(
graph
);
...
...
@@ -46,7 +64,7 @@ const AnfNodePtr SoftmaxGradExtFusion::Process(const FuncGraphPtr &graph, const
MS_EXCEPTION_IF_NULL
(
fusion_node
);
fusion_node
->
set_scope
(
node
->
scope
());
fusion_node
->
set_abstract
(
node
->
abstract
());
AnfAlgo
::
CopyNodeAttr
(
kAttrKeepDims
,
sum
,
fusion_node
);
AnfAlgo
::
CopyNodeAttr
(
kAttrKeepDims
,
"keepdims"
,
sum
,
fusion_node
);
AnfAlgo
::
CopyNodeAttr
(
kAttrAxis
,
sum
,
fusion_node
);
return
fusion_node
;
}
...
...
mindspore/ccsrc/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion.h
浏览文件 @
b3c6da90
...
...
@@ -17,13 +17,15 @@
#define MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_SOFTMAX_GRAD_EXT_FUSION_H_
#include <memory>
#include <string>
#include "pre_activate/common/optimizer.h"
namespace
mindspore
{
namespace
opt
{
class
SoftmaxGradExtFusion
:
public
PatternProcessPass
{
public:
explicit
SoftmaxGradExtFusion
(
bool
multigraph
=
true
)
:
PatternProcessPass
(
"softmax_grad_ext_fusion"
,
multigraph
)
{
explicit
SoftmaxGradExtFusion
(
const
std
::
string
&
name
=
"softmax_grad_ext_fusion"
,
bool
multigraph
=
true
)
:
PatternProcessPass
(
name
,
multigraph
)
{
input0_
=
std
::
make_shared
<
Var
>
();
input1_
=
std
::
make_shared
<
Var
>
();
input2_
=
std
::
make_shared
<
Var
>
();
...
...
@@ -33,12 +35,28 @@ class SoftmaxGradExtFusion : public PatternProcessPass {
const
BaseRef
DefinePattern
()
const
override
;
const
AnfNodePtr
Process
(
const
FuncGraphPtr
&
,
const
AnfNodePtr
&
,
const
EquivPtr
&
)
const
override
;
pr
ivate
:
pr
otected
:
VarPtr
input0_
;
VarPtr
input1_
;
VarPtr
input2_
;
VarPtr
sum_var_
;
};
class
SoftmaxGradExtFusionV2
:
public
SoftmaxGradExtFusion
{
public:
explicit
SoftmaxGradExtFusionV2
(
bool
multigraph
=
true
)
:
SoftmaxGradExtFusion
(
"softmax_grad_ext_fusion_v2"
,
multigraph
)
{}
~
SoftmaxGradExtFusionV2
()
override
=
default
;
const
BaseRef
DefinePattern
()
const
override
;
};
class
SoftmaxGradExtFusionV3
:
public
SoftmaxGradExtFusion
{
public:
explicit
SoftmaxGradExtFusionV3
(
bool
multigraph
=
true
)
:
SoftmaxGradExtFusion
(
"softmax_grad_ext_fusion_v3"
,
multigraph
)
{}
~
SoftmaxGradExtFusionV3
()
override
=
default
;
const
BaseRef
DefinePattern
()
const
override
;
};
}
// namespace opt
}
// namespace mindspore
#endif // MINDSPORE_CCSRC_PRE_ACTIVATE_ASCEND_IR_FUSION_SOFTMAX_GRAD_EXT_FUSION_H_
tests/ut/cpp/pre_activate/ascend/ir_fusion/softmax_grad_ext_fusion_test.cc
浏览文件 @
b3c6da90
...
...
@@ -49,5 +49,47 @@ TEST_F(TestHWOptSoftmaxGradExtFusion, test_fusion) {
FuncGraphPtr
g_after
=
get_py_fun_
.
CallAndParseRet
(
"test_softmax_grad_ext_fusion"
,
"after"
);
EXPECT_TRUE
(
CheckEqualGraph
(
g_after
,
new_graph
));
}
TEST_F
(
TestHWOptSoftmaxGradExtFusion
,
test_fusion_v2
)
{
FuncGraphPtr
g
=
get_py_fun_
.
CallAndParseRet
(
"test_softmax_grad_ext_fusion_v2"
,
"before"
);
EXPECT_NE
(
g
,
nullptr
);
std
::
vector
<
int
>
shp
{
1
,
1
,
1
,
1
};
auto
x_abstract
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
kFloat32
,
shp
);
AbstractBasePtrList
args_spec_list
;
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
args_spec_list
.
push_back
(
x_abstract
);
}
auto
fg
=
GetKernelGraph
(
g
,
args_spec_list
);
auto
optimizer
=
std
::
make_shared
<
opt
::
GraphOptimizer
>
();
auto
pm
=
std
::
make_shared
<
opt
::
PassManager
>
();
pm
->
AddPass
(
std
::
make_shared
<
opt
::
SoftmaxGradExtFusionV2
>
());
optimizer
->
AddPassManager
(
pm
);
FuncGraphPtr
new_graph
=
optimizer
->
Optimize
(
fg
);
FuncGraphPtr
g_after
=
get_py_fun_
.
CallAndParseRet
(
"test_softmax_grad_ext_fusion_v2"
,
"after"
);
EXPECT_TRUE
(
CheckEqualGraph
(
g_after
,
new_graph
));
}
TEST_F
(
TestHWOptSoftmaxGradExtFusion
,
test_fusion_v3
)
{
FuncGraphPtr
g
=
get_py_fun_
.
CallAndParseRet
(
"test_softmax_grad_ext_fusion_v3"
,
"before"
);
EXPECT_NE
(
g
,
nullptr
);
std
::
vector
<
int
>
shp
{
1
,
1
,
1
,
1
};
auto
x_abstract
=
std
::
make_shared
<
abstract
::
AbstractTensor
>
(
kFloat32
,
shp
);
AbstractBasePtrList
args_spec_list
;
for
(
size_t
i
=
0
;
i
<
3
;
++
i
)
{
args_spec_list
.
push_back
(
x_abstract
);
}
auto
fg
=
GetKernelGraph
(
g
,
args_spec_list
);
auto
optimizer
=
std
::
make_shared
<
opt
::
GraphOptimizer
>
();
auto
pm
=
std
::
make_shared
<
opt
::
PassManager
>
();
pm
->
AddPass
(
std
::
make_shared
<
opt
::
SoftmaxGradExtFusionV3
>
());
optimizer
->
AddPassManager
(
pm
);
FuncGraphPtr
new_graph
=
optimizer
->
Optimize
(
fg
);
FuncGraphPtr
g_after
=
get_py_fun_
.
CallAndParseRet
(
"test_softmax_grad_ext_fusion_v3"
,
"after"
);
EXPECT_TRUE
(
CheckEqualGraph
(
g_after
,
new_graph
));
}
}
// namespace opt
}
// namespace mindspore
tests/ut/cpp/python_input/gtest_input/pre_activate/softmax_grad_ext_fusion.py
浏览文件 @
b3c6da90
...
...
@@ -54,3 +54,43 @@ def test_softmax_grad_ext_fusion(tag):
return
MakeTuple
(
res
)
return
fns
[
tag
]
def
test_softmax_grad_ext_fusion_v2
(
tag
):
fns
=
FnDict
()
@
fns
def
before
(
input0
,
input1
,
input2
):
mul
=
Mul
(
input1
,
input0
)
reduce_sum
=
ReduceSum
(
mul
,
axes
)
sub
=
Sub
(
input0
,
reduce_sum
)
mul1
=
Mul
(
input1
,
sub
)
mul_grad
=
Mul
(
input2
,
mul1
)
return
mul_grad
@
fns
def
after
(
input0
,
input1
,
input2
):
res
=
SoftmaxGradExt
(
input0
,
input1
,
input2
)
return
MakeTuple
(
res
)
return
fns
[
tag
]
def
test_softmax_grad_ext_fusion_v3
(
tag
):
fns
=
FnDict
()
@
fns
def
before
(
input0
,
input1
,
input2
):
mul
=
Mul
(
input1
,
input0
)
reduce_sum
=
ReduceSum
(
mul
,
axes
)
sub
=
Sub
(
input0
,
reduce_sum
)
mul1
=
Mul
(
input1
,
sub
)
mul_grad
=
Mul
(
mul1
,
input2
)
return
mul_grad
@
fns
def
after
(
input0
,
input1
,
input2
):
res
=
SoftmaxGradExt
(
input0
,
input1
,
input2
)
return
MakeTuple
(
res
)
return
fns
[
tag
]
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录