Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
daa423a2
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
daa423a2
编写于
4年前
作者:
Z
zhengjun10
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add mul add fusion pass
上级
ac239b65
master
无相关合并请求
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
215 addition
and
0 deletion
+215
-0
mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc
mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc
+3
-0
mindspore/lite/tools/converter/graphdef_transform.cc
mindspore/lite/tools/converter/graphdef_transform.cc
+12
-0
mindspore/lite/tools/converter/legacy_optimizer/fusion/CMakeLists.txt
...te/tools/converter/legacy_optimizer/fusion/CMakeLists.txt
+1
-0
mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc
.../converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc
+147
-0
mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h
...s/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h
+52
-0
未找到文件。
mindspore/lite/src/runtime/kernel/arm/fp32/scale.cc
浏览文件 @
daa423a2
...
...
@@ -81,6 +81,9 @@ int ScaleCPUKernel::InitParameter() {
auto
scale_tensor
=
in_tensors_
.
at
(
1
);
auto
scale_shape
=
scale_tensor
->
shape
();
if
(
scale_param_
->
axis_
<
0
)
{
scale_param_
->
axis_
=
scale_param_
->
axis_
+
in_shape
.
size
();
}
if
(
scale_shape
.
size
()
+
scale_param_
->
axis_
>
in_shape
.
size
())
{
MS_LOG
(
ERROR
)
<<
"Scale tensor shape is incorrect."
;
return
RET_ERROR
;
...
...
This diff is collapsed.
Click to expand it.
mindspore/lite/tools/converter/graphdef_transform.cc
浏览文件 @
daa423a2
...
...
@@ -28,6 +28,7 @@
#include "tools/converter/legacy_optimizer/fusion/format_trans_transpose_fusion_pass.h"
#include "tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h"
#include "tools/converter/legacy_optimizer/fusion/batchnorm_convert_scale_pass.h"
#include "tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h"
#include "tools/converter/legacy_optimizer/graph/weight_format_hardcode_pass.h"
#include "tools/converter/legacy_optimizer/graph/weight_format_transform_pass.h"
#include "tools/converter/legacy_optimizer/graph/format_trans_pass.h"
...
...
@@ -172,6 +173,17 @@ int GraphDefTransform::Transform(const converter::Flags &ctx) {
}
}
{
Optimizer
fusionOptimizer
;
fusionOptimizer
.
AddPass
(
new
(
std
::
nothrow
)
MulAddFusionPass
());
fusionOptimizer
.
AddPass
(
new
(
std
::
nothrow
)
IsolatedNodeRemovePass
());
status
=
fusionOptimizer
.
Run
(
graphDefT
);
if
(
status
!=
RET_OK
&&
status
!=
RET_NO_CHANGE
)
{
MS_LOG
(
ERROR
)
<<
"Run fusionOptimizer graphPasses Failed"
;
return
status
;
}
}
// do quantization
if
(
fbQuantizer
!=
nullptr
)
{
status
=
fbQuantizer
->
DoQuantize
();
...
...
This diff is collapsed.
Click to expand it.
mindspore/lite/tools/converter/legacy_optimizer/fusion/CMakeLists.txt
浏览文件 @
daa423a2
...
...
@@ -2,6 +2,7 @@ add_library(fusion_mid OBJECT
${
CMAKE_CURRENT_SOURCE_DIR
}
/fusion_pattern.cc
${
CMAKE_CURRENT_SOURCE_DIR
}
/fusion_pass.cc
${
CMAKE_CURRENT_SOURCE_DIR
}
/matmul_biasadd_fusion_pass.cc
${
CMAKE_CURRENT_SOURCE_DIR
}
/mul_add_fusion_pass.cc
${
CMAKE_CURRENT_SOURCE_DIR
}
/quant_cast_fusion_pass.cc
${
CMAKE_CURRENT_SOURCE_DIR
}
/batchnorm_fold_fusion_pass.cc
${
CMAKE_CURRENT_SOURCE_DIR
}
/format_trans_fusion_pass.cc
...
...
This diff is collapsed.
Click to expand it.
mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc
0 → 100644
浏览文件 @
daa423a2
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string>
#include <unordered_map>
#include <vector>
#include <utility>
#include <memory>
#include "tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h"
#include "utils/log_adapter.h"
#include "securec/include/securec.h"
// #include "utils/log_adapter.h"
#include "tools/common/graph_util.h"
#include "include/errorcode.h"
#include "schema/inner/model_generated.h"
#include "src/common/op_utils.h"
namespace
mindspore
{
namespace
lite
{
#define MUL_ADD_MATCH_PATH_LEN 2
#define ADD_OP_BIAS_INDEX 1
#define MUL_OP_BIAS_INDEX 1
#define MUL_OP_INPUT_NUM 2
#define ADD_OP_INPUT_NUM 2
STATUS
MulAddFusionPass
::
Run
(
MetaGraphT
*
graph
)
{
return
FusionPass
::
Run
(
graph
);
}
STATUS
MulAddFusionPass
::
DefinePattern
()
{
auto
mulOp
=
std
::
make_shared
<
PatternOp
>
();
mulOp
->
id
=
MUL_NAME
;
mulOp
->
types
=
{
schema
::
PrimitiveType_Mul
};
auto
baOp
=
std
::
make_shared
<
PatternOp
>
();
baOp
->
id
=
ADD_NAME
;
baOp
->
types
=
{
schema
::
PrimitiveType_Add
};
baOp
->
left
=
mulOp
;
std
::
unique_ptr
<
FusionPattern
>
fusionPattern
(
new
(
std
::
nothrow
)
FusionPattern
(
"MulAddFusion"
));
if
(
fusionPattern
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new fusionPattern failed"
;
return
RET_ERROR
;
}
fusionPattern
->
AddPatternOp
(
mulOp
);
fusionPattern
->
AddPatternOp
(
baOp
);
fusionPattern
->
Finish
();
this
->
patterns
.
emplace_back
(
fusionPattern
.
release
());
return
RET_OK
;
}
STATUS
MulAddFusionPass
::
DoFusion
(
MetaGraphT
*
graph
,
const
std
::
string
&
patternName
,
std
::
unordered_map
<
std
::
string
,
std
::
shared_ptr
<
Path
>>
&
matchedPath
)
{
MS_ASSERT
(
graph
!=
nullptr
);
if
(
matchedPath
.
size
()
!=
MUL_ADD_MATCH_PATH_LEN
)
{
MS_LOG
(
ERROR
)
<<
"Mul-Add-Fusion should have two NodeIndex in matchedPair"
;
return
RET_PARAM_INVALID
;
}
auto
mulPath
=
matchedPath
[
MUL_NAME
];
auto
addPath
=
matchedPath
[
ADD_NAME
];
auto
&
mulNode
=
graph
->
nodes
.
at
(
mulPath
->
nodeIdx
);
auto
&
addNode
=
graph
->
nodes
.
at
(
addPath
->
nodeIdx
);
// can not check shape because there is now shape infer in converter
MS_ASSERT
(
mulNode
!=
nullptr
);
auto
mulNodeInputIndex
=
mulNode
->
inputIndex
;
MS_ASSERT
(
mulNodeInputIndex
.
size
()
==
MUL_OP_INPUT_NUM
);
MS_ASSERT
(
graph
->
allTensors
.
size
()
>
mulNodeInputIndex
.
at
(
MUL_OP_BIAS_INDEX
));
const
auto
&
mulNodeBiasTensor
=
graph
->
allTensors
.
at
(
mulNodeInputIndex
.
at
(
MUL_OP_BIAS_INDEX
));
MS_ASSERT
(
mulNodeBiasTensor
!=
nullptr
);
if
(
mulNodeBiasTensor
->
refCount
!=
schema
::
NodeType_ValueNode
)
{
// dont fusion, return
return
RET_OK
;
}
// add node the second tensor is not constant tensor, don't fusion
auto
addNodeInputIndex
=
addNode
->
inputIndex
;
if
(
addNodeInputIndex
.
size
()
!=
ADD_OP_INPUT_NUM
)
{
MS_LOG
(
ERROR
)
<<
"add node input tensors number is invalid! "
;
// baNode->name.c_str());
return
RET_ERROR
;
}
MS_ASSERT
(
graph
->
allTensors
.
size
()
>
addNodeInputIndex
.
at
(
ADD_OP_BIAS_INDEX
));
const
auto
&
addNodeBiasTensor
=
graph
->
allTensors
.
at
(
addNodeInputIndex
.
at
(
ADD_OP_BIAS_INDEX
));
MS_ASSERT
(
addNodeBiasTensor
!=
nullptr
);
if
(
addNodeBiasTensor
->
refCount
!=
schema
::
NodeType_ValueNode
)
{
// dont fusion, return
return
RET_OK
;
}
// convert mul and add to scale
auto
status
=
AddNewScaleNode
(
graph
,
mulNode
,
addNode
.
get
(),
addNodeInputIndex
.
at
(
ADD_OP_BIAS_INDEX
));
if
(
RET_OK
!=
status
)
{
MS_LOG
(
ERROR
)
<<
"AddFullConnectionBiasTensor failed, %d"
;
// status);
return
status
;
}
return
RET_OK
;
}
STATUS
MulAddFusionPass
::
AddNewScaleNode
(
MetaGraphT
*
graph
,
const
std
::
unique_ptr
<
CNodeT
>
&
mulNode
,
CNodeT
*
addNode
,
uint32_t
addBiasIndex
)
{
MS_ASSERT
(
graph
!=
nullptr
);
MS_ASSERT
(
mulNode
!=
nullptr
);
MS_ASSERT
(
addNode
!=
nullptr
);
// replace mulNode as scale
mulNode
->
primitive
->
value
.
type
=
schema
::
PrimitiveType_Scale
;
std
::
unique_ptr
<
ScaleT
>
scaleParam
(
new
ScaleT
());
if
(
scaleParam
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"new transposeParam failed"
;
return
RET_ERROR
;
}
// NHWC
scaleParam
->
axis
=
-
1
;
mulNode
->
primitive
->
value
.
value
=
scaleParam
.
release
();
mulNode
->
inputIndex
.
push_back
(
addBiasIndex
);
if
(
addNode
->
primitive
->
value
.
AsAdd
()
->
activationType
!=
ActivationType_NO_ACTIVATION
)
{
// repace addnode as activation
std
::
unique_ptr
<
ActivationT
>
activationParam
(
new
ActivationT
());
activationParam
->
type
=
addNode
->
primitive
->
value
.
AsAdd
()
->
activationType
;
// activationParam->alpha = 0.0;
addNode
->
primitive
->
value
.
type
=
schema
::
PrimitiveType_Activation
;
addNode
->
primitive
->
value
.
value
=
activationParam
.
release
();
addNode
->
inputIndex
.
pop_back
();
return
RET_OK
;
}
// delete addnode
auto
status
=
IsolateOneWayNode
(
graph
,
addNode
);
if
(
status
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"IsolateOneWayNode failed, subGraph: %zu, node: %zu, error: %d"
;
// baPath->subGraphIdx, baPath->nodeIdx, status);
return
status
;
}
return
RET_OK
;
}
}
// namespace lite
}
// namespace mindspore
This diff is collapsed.
Click to expand it.
mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h
0 → 100644
浏览文件 @
daa423a2
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_PREDICT_MUL_ADD_FUSION_PASS_H
#define MINDSPORE_PREDICT_MUL_ADD_FUSION_PASS_H
#include <string>
#include <unordered_map>
#include <memory>
#include <algorithm>
#include <utility>
#include "tools/converter/legacy_optimizer/fusion/fusion_pass.h"
#include "tools/common/graph_util.h"
namespace
mindspore
{
namespace
lite
{
constexpr
const
char
*
MUL_NAME
=
"MUL"
;
constexpr
const
char
*
ADD_NAME
=
"ADD"
;
class
MulAddFusionPass
:
public
FusionPass
{
public:
MulAddFusionPass
()
=
default
;
~
MulAddFusionPass
()
=
default
;
STATUS
DefinePattern
()
override
;
STATUS
DoFusion
(
MetaGraphT
*
graph
,
const
std
::
string
&
patternName
,
std
::
unordered_map
<
std
::
string
,
std
::
shared_ptr
<
Path
>>
&
matchedPath
)
override
;
STATUS
Run
(
MetaGraphT
*
graph
)
override
;
protected:
static
STATUS
AddNewScaleNode
(
MetaGraphT
*
graph
,
const
std
::
unique_ptr
<
CNodeT
>
&
mulNode
,
CNodeT
*
addNode
,
uint32_t
addBiasIndex
);
};
}
// namespace lite
}
// namespace mindspore
#endif // MINDSPORE_PREDICT_MUL_ADD_FUSION_PASS_H
This diff is collapsed.
Click to expand it.
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录
新手
引导
客服
返回
顶部