Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
1d55e4e3
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
1d55e4e3
编写于
8月 18, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 18, 2020
浏览文件
操作
浏览文件
下载
差异文件
!4620 add nullptr check
Merge pull request !4620 from zhengjun10/master
上级
4394ad84
e6e3956c
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
60 addition
and
12 deletion
+60
-12
mindspore/lite/tools/converter/anf_transform.cc
mindspore/lite/tools/converter/anf_transform.cc
+1
-1
mindspore/lite/tools/optimizer/common/gllo_utils.cc
mindspore/lite/tools/optimizer/common/gllo_utils.cc
+9
-1
mindspore/lite/tools/optimizer/common/pass_manager_extends.cc
...spore/lite/tools/optimizer/common/pass_manager_extends.cc
+5
-1
mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc
...re/lite/tools/optimizer/fusion/constant_folding_fusion.cc
+18
-2
mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc
mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc
+6
-2
mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc
...pore/lite/tools/optimizer/fusion/conv_transform_fusion.cc
+21
-5
未找到文件。
mindspore/lite/tools/converter/anf_transform.cc
浏览文件 @
1d55e4e3
...
@@ -36,7 +36,7 @@ void AnfTransform::SetGraphDef(schema::MetaGraphT *_dstDef) { graphDefT = _dstDe
...
@@ -36,7 +36,7 @@ void AnfTransform::SetGraphDef(schema::MetaGraphT *_dstDef) { graphDefT = _dstDe
FuncGraphPtr
AnfTransform
::
Transform
(
const
FuncGraphPtr
&
old_graph
)
{
FuncGraphPtr
AnfTransform
::
Transform
(
const
FuncGraphPtr
&
old_graph
)
{
// return old_graph;
// return old_graph;
auto
optimizer
=
std
::
make_shared
<
opt
::
GraphOptimizer
>
();
auto
optimizer
=
std
::
make_shared
<
opt
::
GraphOptimizer
>
();
auto
pm
=
std
::
make_shared
<
opt
::
PassManager
>
();
auto
pm
=
std
::
make_shared
<
opt
::
PassManager
>
(
"anf fusion pass manager"
,
false
);
pm
->
AddPass
(
std
::
make_shared
<
opt
::
ConvBiasaddFusion
>
());
pm
->
AddPass
(
std
::
make_shared
<
opt
::
ConvBiasaddFusion
>
());
pm
->
AddPass
(
std
::
make_shared
<
opt
::
ConvBatchNormFusion
>
());
pm
->
AddPass
(
std
::
make_shared
<
opt
::
ConvBatchNormFusion
>
());
pm
->
AddPass
(
std
::
make_shared
<
opt
::
ConvScaleFusion
>
());
pm
->
AddPass
(
std
::
make_shared
<
opt
::
ConvScaleFusion
>
());
...
...
mindspore/lite/tools/optimizer/common/gllo_utils.cc
浏览文件 @
1d55e4e3
...
@@ -327,7 +327,15 @@ schema::PrimitiveType GetCNodeType(const BaseRef &n) {
...
@@ -327,7 +327,15 @@ schema::PrimitiveType GetCNodeType(const BaseRef &n) {
}
}
bool
IsParamNode
(
const
BaseRef
&
n
)
{
bool
IsParamNode
(
const
BaseRef
&
n
)
{
return
utils
::
isa
<
ParameterPtr
>
(
n
);
if
(
!
utils
::
isa
<
ParameterPtr
>
(
n
))
{
return
false
;
}
auto
param
=
utils
::
cast
<
ParameterPtr
>
(
n
)
->
default_param
();
auto
tensor
=
std
::
dynamic_pointer_cast
<
ParamValueLite
>
(
param
);
if
(
tensor
==
nullptr
)
{
return
false
;
}
return
tensor
->
tensor_addr
()
!=
nullptr
;
}
}
bool
IsConvNode
(
const
BaseRef
&
n
)
{
bool
IsConvNode
(
const
BaseRef
&
n
)
{
...
...
mindspore/lite/tools/optimizer/common/pass_manager_extends.cc
浏览文件 @
1d55e4e3
...
@@ -28,6 +28,8 @@
...
@@ -28,6 +28,8 @@
namespace
mindspore
{
namespace
mindspore
{
namespace
opt
{
namespace
opt
{
static
size_t
count
=
0
;
constexpr
size_t
kMaxRepassTimes
=
9
;
const
std
::
vector
<
PassPtr
>
&
PassManager
::
Passes
()
const
{
return
passes_
;
}
const
std
::
vector
<
PassPtr
>
&
PassManager
::
Passes
()
const
{
return
passes_
;
}
void
PassManager
::
AddPass
(
const
PassPtr
&
pass
)
{
void
PassManager
::
AddPass
(
const
PassPtr
&
pass
)
{
...
@@ -79,9 +81,11 @@ bool PassManager::Run(const FuncGraphPtr &func_graph) const {
...
@@ -79,9 +81,11 @@ bool PassManager::Run(const FuncGraphPtr &func_graph) const {
while
(
change
)
{
while
(
change
)
{
change
=
Run
(
func_graph
,
passes_
);
change
=
Run
(
func_graph
,
passes_
);
changed
=
change
||
changed
;
changed
=
change
||
changed
;
if
(
run_only_once_
)
{
if
(
run_only_once_
||
count
>
kMaxRepassTimes
)
{
break
;
break
;
}
}
count
++
;
MS_LOG
(
INFO
)
<<
"Run pass counts:"
<<
count
;
}
}
return
changed
;
return
changed
;
}
}
...
...
mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc
浏览文件 @
1d55e4e3
...
@@ -45,12 +45,20 @@ const std::vector<Tensor *> GetCNodeInputTensors(const CNodePtr &CNode) {
...
@@ -45,12 +45,20 @@ const std::vector<Tensor *> GetCNodeInputTensors(const CNodePtr &CNode) {
auto
tensor_shape
=
tensorT
->
dims
;
auto
tensor_shape
=
tensorT
->
dims
;
auto
lite_tensor
=
auto
lite_tensor
=
new
(
std
::
nothrow
)
Tensor
(
TypeId
(
tensorT
->
dataType
),
tensor_shape
,
tensorT
->
format
,
tensorT
->
nodeType
);
new
(
std
::
nothrow
)
Tensor
(
TypeId
(
tensorT
->
dataType
),
tensor_shape
,
tensorT
->
format
,
tensorT
->
nodeType
);
if
(
lite_tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"lite tensor is nullptr"
;
return
input_tensors
;
}
auto
lite_tensor_size
=
tensorT
->
data
.
size
()
*
sizeof
(
uint8_t
);
auto
lite_tensor_size
=
tensorT
->
data
.
size
()
*
sizeof
(
uint8_t
);
// when tensorT as graph input
// when tensorT as graph input
if
(
lite_tensor_size
==
0
)
{
if
(
lite_tensor_size
==
0
)
{
return
input_tensors
;
return
input_tensors
;
}
}
auto
tensor_data
=
new
(
std
::
nothrow
)
char
[
lite_tensor_size
/
sizeof
(
char
)];
auto
tensor_data
=
new
(
std
::
nothrow
)
char
[
lite_tensor_size
/
sizeof
(
char
)];
if
(
tensor_data
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
input_tensors
;
}
auto
ret
=
memcpy_s
(
tensor_data
,
lite_tensor_size
,
tensorT
->
data
.
data
(),
lite_tensor_size
);
auto
ret
=
memcpy_s
(
tensor_data
,
lite_tensor_size
,
tensorT
->
data
.
data
(),
lite_tensor_size
);
if
(
ret
!=
EOK
)
{
if
(
ret
!=
EOK
)
{
MS_LOG
(
EXCEPTION
)
<<
"memcpy error: "
<<
ret
;
MS_LOG
(
EXCEPTION
)
<<
"memcpy error: "
<<
ret
;
...
@@ -97,6 +105,10 @@ const ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *ten
...
@@ -97,6 +105,10 @@ const ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *ten
if
(
tensor
->
Data
()
!=
nullptr
)
{
if
(
tensor
->
Data
()
!=
nullptr
)
{
auto
size
=
tensor
->
ElementsNum
();
auto
size
=
tensor
->
ElementsNum
();
auto
tensor_data
=
new
(
std
::
nothrow
)
float
[
size
];
auto
tensor_data
=
new
(
std
::
nothrow
)
float
[
size
];
if
(
tensor_data
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
nullptr
;
}
auto
ret
=
memcpy_s
(
tensor_data
,
size
*
sizeof
(
float
),
tensor
->
Data
(),
size
*
sizeof
(
float
));
auto
ret
=
memcpy_s
(
tensor_data
,
size
*
sizeof
(
float
),
tensor
->
Data
(),
size
*
sizeof
(
float
));
if
(
ret
!=
EOK
)
{
if
(
ret
!=
EOK
)
{
MS_LOG
(
EXCEPTION
)
<<
"memcpy error: "
<<
ret
;
MS_LOG
(
EXCEPTION
)
<<
"memcpy error: "
<<
ret
;
...
@@ -150,11 +162,15 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An
...
@@ -150,11 +162,15 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An
std
::
vector
<
Tensor
*>
output_tensors
{
output_nums
,
new
Tensor
()};
std
::
vector
<
Tensor
*>
output_tensors
{
output_nums
,
new
Tensor
()};
auto
scheam_primitive
=
PackPrimitiveT
(
input_cnode
);
auto
scheam_primitive
=
PackPrimitiveT
(
input_cnode
);
auto
lite_primitive
=
lite
::
Primitive
::
CreatePrimitive
(
scheam_primitive
);
auto
lite_primitive
=
lite
::
Primitive
::
CreatePrimitive
(
scheam_primitive
);
if
(
lite_primitive
==
nullptr
)
{
MS_LOG
(
DEBUG
)
<<
"constant_folding schedule node lite primitive nullptr"
;
return
nullptr
;
}
lite_primitive
->
InferShape
(
input_tensors
,
output_tensors
);
lite_primitive
->
InferShape
(
input_tensors
,
output_tensors
);
auto
lite_kernel
=
GetLiteKernel
(
input_tensors
,
output_tensors
,
lite_primitive
);
auto
lite_kernel
=
GetLiteKernel
(
input_tensors
,
output_tensors
,
lite_primitive
);
if
(
lite_kernel
==
nullptr
)
{
if
(
lite_kernel
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"constant_folding schedule node lite kernel nullptr"
;
MS_LOG
(
DEBUG
)
<<
"constant_folding schedule node lite kernel nullptr"
;
return
any_node
;
return
nullptr
;
}
}
auto
ret
=
lite_kernel
->
Run
();
auto
ret
=
lite_kernel
->
Run
();
if
(
0
!=
ret
)
{
if
(
0
!=
ret
)
{
...
...
mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc
浏览文件 @
1d55e4e3
...
@@ -83,7 +83,11 @@ void GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, c
...
@@ -83,7 +83,11 @@ void GenConvNewBias(const FuncGraphPtr &func_graph, const CNodePtr &conv_node, c
if
(
kernel_nums
<=
0
)
{
if
(
kernel_nums
<=
0
)
{
MS_LOG
(
EXCEPTION
)
<<
"kernel num less than 0"
;
MS_LOG
(
EXCEPTION
)
<<
"kernel num less than 0"
;
}
}
auto
add_bias_data
=
new
(
std
::
nothrow
)
float
[
kernel_nums
];
auto
add_bias_data
=
new
(
std
::
nothrow
)
float
[
kernel_nums
];
if
(
add_bias_data
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
;
}
auto
bias_add_weight
=
bias_node
->
input
(
kAddWEIGHTINDEX
);
auto
bias_add_weight
=
bias_node
->
input
(
kAddWEIGHTINDEX
);
CheckIfNodeIsParam
(
bias_add_weight
);
CheckIfNodeIsParam
(
bias_add_weight
);
auto
add_weight_param
=
bias_add_weight
->
cast
<
ParameterPtr
>
()
->
default_param
();
auto
add_weight_param
=
bias_add_weight
->
cast
<
ParameterPtr
>
()
->
default_param
();
...
@@ -140,7 +144,7 @@ const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, cons
...
@@ -140,7 +144,7 @@ const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, cons
AnfNodePtr
conv_node_anf
=
add_node
->
input
(
1
);
AnfNodePtr
conv_node_anf
=
add_node
->
input
(
1
);
CheckIfAnfNodeIsNull
(
conv_node_anf
);
CheckIfAnfNodeIsNull
(
conv_node_anf
);
if
(
IsMultiOutputTensors
(
func_graph
,
conv_node_anf
))
{
if
(
IsMultiOutputTensors
(
func_graph
,
conv_node_anf
))
{
return
add_node
;
return
nullptr
;
}
}
auto
conv_node
=
conv_node_anf
->
cast
<
CNodePtr
>
();
auto
conv_node
=
conv_node_anf
->
cast
<
CNodePtr
>
();
CheckIfCNodeIsNull
(
conv_node
);
CheckIfCNodeIsNull
(
conv_node
);
...
...
mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc
浏览文件 @
1d55e4e3
...
@@ -67,7 +67,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
...
@@ -67,7 +67,7 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
auto
pre_node
=
transform_node
->
input
(
1
);
auto
pre_node
=
transform_node
->
input
(
1
);
auto
conv_node
=
pre_node
->
cast
<
CNodePtr
>
();
auto
conv_node
=
pre_node
->
cast
<
CNodePtr
>
();
if
(
IsMultiOutputTensors
(
func_graph
,
conv_node
))
{
if
(
IsMultiOutputTensors
(
func_graph
,
conv_node
))
{
return
transform_node
;
return
nullptr
;
}
}
auto
abstr
=
transform_node
->
abstract
();
auto
abstr
=
transform_node
->
abstract
();
...
@@ -76,8 +76,16 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
...
@@ -76,8 +76,16 @@ const AnfNodePtr ConvTransformFusion::Process(const FuncGraphPtr &func_graph, co
MS_LOG
(
ERROR
)
<<
"Unsupported conv node, "
<<
conv_node
->
DebugString
();
MS_LOG
(
ERROR
)
<<
"Unsupported conv node, "
<<
conv_node
->
DebugString
();
return
node
;
return
node
;
}
}
auto
trans_scale
=
new
(
std
::
nothrow
)
float
[
kernel_nums
];
auto
trans_scale
=
new
(
std
::
nothrow
)
float
[
kernel_nums
];
auto
trans_bias
=
new
(
std
::
nothrow
)
float
[
kernel_nums
];
if
(
trans_scale
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
nullptr
;
}
auto
trans_bias
=
new
(
std
::
nothrow
)
float
[
kernel_nums
];
if
(
trans_bias
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
nullptr
;
}
GenTransParam
(
transform_node
,
kernel_nums
,
trans_scale
,
trans_bias
);
GenTransParam
(
transform_node
,
kernel_nums
,
trans_scale
,
trans_bias
);
GenNewConvTensor
(
func_graph
,
conv_node
,
kernel_nums
,
trans_scale
,
trans_bias
);
GenNewConvTensor
(
func_graph
,
conv_node
,
kernel_nums
,
trans_scale
,
trans_bias
);
delete
[]
trans_bias
;
delete
[]
trans_bias
;
...
@@ -155,7 +163,11 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph,
...
@@ -155,7 +163,11 @@ const void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph,
bias_data
=
reinterpret_cast
<
float
*>
(
bias_tensor
->
tensor_addr
());
bias_data
=
reinterpret_cast
<
float
*>
(
bias_tensor
->
tensor_addr
());
bias_flag
=
true
;
bias_flag
=
true
;
}
else
{
}
else
{
bias_data
=
new
(
std
::
nothrow
)
float
[
kernel_num
];
bias_data
=
new
(
std
::
nothrow
)
float
[
kernel_num
];
if
(
trans_scale
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
;
}
}
}
CalNewBiasTensor
(
bias_data
,
kernel_num
,
bias_flag
,
trans_scale
,
trans_bias
);
CalNewBiasTensor
(
bias_data
,
kernel_num
,
bias_flag
,
trans_scale
,
trans_bias
);
if
(
!
bias_flag
)
{
if
(
!
bias_flag
)
{
...
@@ -193,7 +205,11 @@ const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_nu
...
@@ -193,7 +205,11 @@ const void ConvTransformFusion::CalNewBiasTensor(float *bias_data, int kernel_nu
const
float
*
trans_scale
,
const
float
*
trans_bias
)
const
{
const
float
*
trans_scale
,
const
float
*
trans_bias
)
const
{
MS_ASSERT
(
bias_data
!=
nullptr
);
MS_ASSERT
(
bias_data
!=
nullptr
);
if
(
bias_flag
)
{
if
(
bias_flag
)
{
auto
tmp_bias_data
=
new
(
std
::
nothrow
)
float
[
kernel_num
];
auto
tmp_bias_data
=
new
(
std
::
nothrow
)
float
[
kernel_num
];
if
(
tmp_bias_data
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"tensor_data is nullptr"
;
return
;
}
if
(
EOK
!=
memset_s
(
tmp_bias_data
,
kernel_num
*
sizeof
(
float
),
0
,
kernel_num
*
sizeof
(
float
)))
{
if
(
EOK
!=
memset_s
(
tmp_bias_data
,
kernel_num
*
sizeof
(
float
),
0
,
kernel_num
*
sizeof
(
float
)))
{
MS_LOG
(
EXCEPTION
)
<<
"memset bias data failed"
;
MS_LOG
(
EXCEPTION
)
<<
"memset bias data failed"
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录