Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
13f52ef1
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
13f52ef1
编写于
8月 04, 2020
作者:
H
hangq
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix bug in lite_session and tensor for deconstruct session
上级
8a0b3e23
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
89 addition
and
49 deletion
+89
-49
mindspore/lite/include/context.h
mindspore/lite/include/context.h
+3
-3
mindspore/lite/src/common/file_utils.cc
mindspore/lite/src/common/file_utils.cc
+1
-2
mindspore/lite/src/common/ms_tensor_utils.cc
mindspore/lite/src/common/ms_tensor_utils.cc
+1
-1
mindspore/lite/src/context.cc
mindspore/lite/src/context.cc
+3
-3
mindspore/lite/src/ir/tensor.cc
mindspore/lite/src/ir/tensor.cc
+5
-2
mindspore/lite/src/lite_session.cc
mindspore/lite/src/lite_session.cc
+22
-0
mindspore/lite/src/model_impl.cc
mindspore/lite/src/model_impl.cc
+2
-3
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc
+2
-2
mindspore/lite/src/scheduler.cc
mindspore/lite/src/scheduler.cc
+2
-2
mindspore/lite/tools/benchmark/benchmark.cc
mindspore/lite/tools/benchmark/benchmark.cc
+48
-31
未找到文件。
mindspore/lite/include/context.h
浏览文件 @
13f52ef1
...
...
@@ -56,10 +56,10 @@ class MS_API Context {
/// \brief Constructor of MindSpore Lite Context using input value for parameters.
///
/// \param[in] thread
N
um Define the threadNum during the runtime.
/// \param[in] thread
_n
um Define the threadNum during the runtime.
/// \param[in] allocator Define the allocator for malloc.
/// \param[in] device
C
tx Define device information during the runtime.
Context
(
int
thread
Num
,
std
::
shared_ptr
<
Allocator
>
allocator
,
DeviceContext
deviceC
tx
);
/// \param[in] device
_c
tx Define device information during the runtime.
Context
(
int
thread
_num
,
std
::
shared_ptr
<
Allocator
>
allocator
,
DeviceContext
device_c
tx
);
/// \brief Destructor of MindSpore Lite Context.
virtual
~
Context
();
...
...
mindspore/lite/src/common/file_utils.cc
浏览文件 @
13f52ef1
...
...
@@ -44,7 +44,7 @@ char *ReadFile(const char *file, size_t *size) {
ifs
.
seekg
(
0
,
std
::
ios
::
end
);
*
size
=
ifs
.
tellg
();
std
::
unique_ptr
<
char
>
buf
(
new
(
std
::
nothrow
)
char
[
*
size
]);
std
::
unique_ptr
<
char
[]
>
buf
(
new
(
std
::
nothrow
)
char
[
*
size
]);
if
(
buf
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"malloc buf failed, file: "
<<
realPath
;
ifs
.
close
();
...
...
@@ -165,4 +165,3 @@ void CompareOutput(float *output_data, std::string file_path) {
}
// namespace lite
}
// namespace mindspore
mindspore/lite/src/common/ms_tensor_utils.cc
浏览文件 @
13f52ef1
...
...
@@ -33,7 +33,7 @@ std::vector<MSTensor *> PackToMSTensors(const std::vector<Tensor *> &in_tensors)
MS_LOG
(
ERROR
)
<<
"new LiteTensor failed"
;
return
ret
;
}
ret
.
emplace_back
();
ret
.
emplace_back
(
ms_tensor
);
}
return
ret
;
}
...
...
mindspore/lite/src/context.cc
浏览文件 @
13f52ef1
...
...
@@ -22,10 +22,10 @@ Context::Context() { allocator = Allocator::Create(); }
Context
::~
Context
()
=
default
;
Context
::
Context
(
int
thread
Num
,
std
::
shared_ptr
<
Allocator
>
allocator
,
DeviceContext
deviceC
tx
)
{
Context
::
Context
(
int
thread
_num
,
std
::
shared_ptr
<
Allocator
>
allocator
,
DeviceContext
device_c
tx
)
{
this
->
allocator
=
std
::
move
(
allocator
);
this
->
thread_num_
=
thread
N
um
;
this
->
device_ctx_
=
std
::
move
(
deviceCtx
)
;
this
->
thread_num_
=
thread
_n
um
;
this
->
device_ctx_
=
device_ctx
;
}
}
// namespace mindspore::lite
mindspore/lite/src/ir/tensor.cc
浏览文件 @
13f52ef1
...
...
@@ -74,7 +74,11 @@ int Tensor::CopyTensor(const Tensor &srcTensor, bool copyData) {
Tensor
::~
Tensor
()
{
if
(
nullptr
!=
this
->
data_
)
{
free
(
this
->
data_
);
if
(
this
->
allocator_
!=
nullptr
)
{
this
->
allocator_
->
Free
(
this
->
data_
);
}
else
{
free
(
this
->
data_
);
}
}
}
...
...
@@ -320,4 +324,3 @@ MSTensor *MSTensor::CreateTensor(TypeId data_type, const std::vector<int> &shape
}
}
// namespace tensor
}
// namespace mindspore
mindspore/lite/src/lite_session.cc
浏览文件 @
13f52ef1
...
...
@@ -220,8 +220,13 @@ void LiteSession::BindThread(bool ifBind) {
LiteSession
::~
LiteSession
()
{
for
(
auto
*
tensor
:
tensors
)
{
// weight data can not be to free, we will free weight data when freeing meta_graph
if
(
tensor
->
TensorType
()
==
schema
::
NodeType_ValueNode
&&
!
IsContain
(
this
->
inputs
,
tensor
))
{
tensor
->
SetData
(
nullptr
);
}
delete
tensor
;
}
// inputs outputs input_map output_map are freed in tensors
for
(
auto
*
input
:
inputs
)
{
((
tensor
::
LiteTensor
*
)
input
)
->
SetTensorImpl
(
nullptr
);
delete
input
;
...
...
@@ -230,9 +235,26 @@ LiteSession::~LiteSession() {
((
tensor
::
LiteTensor
*
)
output
)
->
SetTensorImpl
(
nullptr
);
delete
output
;
}
for
(
auto
iter
:
this
->
input_map
)
{
for
(
auto
*
ms_tensor
:
iter
.
second
)
{
((
tensor
::
LiteTensor
*
)
ms_tensor
)
->
SetTensorImpl
(
nullptr
);
delete
ms_tensor
;
}
iter
.
second
.
clear
();
}
input_map
.
clear
();
for
(
auto
iter
:
this
->
output_map
)
{
for
(
auto
*
ms_tensor
:
iter
.
second
)
{
((
tensor
::
LiteTensor
*
)
ms_tensor
)
->
SetTensorImpl
(
nullptr
);
delete
ms_tensor
;
}
iter
.
second
.
clear
();
}
output_map
.
clear
();
for
(
auto
*
kernel
:
kernels
)
{
delete
kernel
;
}
delete
this
->
context_
;
}
std
::
vector
<
mindspore
::
tensor
::
MSTensor
*>
LiteSession
::
GetInputsByName
(
const
std
::
string
&
name
)
const
{
...
...
mindspore/lite/src/model_impl.cc
浏览文件 @
13f52ef1
...
...
@@ -56,7 +56,7 @@ lite::Primitive *ModelImpl::GetOp(const std::string &name) const {
}
ModelImpl
::~
ModelImpl
()
{
delete
(
this
->
model_buf_
);
delete
[]
(
this
->
model_buf_
);
for
(
auto
iter
:
ops
)
{
delete
(
iter
.
second
);
}
...
...
@@ -64,7 +64,7 @@ ModelImpl::~ModelImpl() {
}
void
ModelImpl
::
FreeMetaGraph
()
{
delete
this
->
model_buf_
;
delete
[](
this
->
model_buf_
)
;
model_buf_
=
nullptr
;
}
...
...
@@ -200,4 +200,3 @@ int ModelImpl::BuildOps() {
return
0
;
}
}
// namespace mindspore::lite
mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic.cc
浏览文件 @
13f52ef1
...
...
@@ -32,11 +32,11 @@ namespace mindspore::kernel {
ArithmeticCPUKernel
::~
ArithmeticCPUKernel
()
{
if
(
tile_data0_
!=
nullptr
)
{
free
(
tile_data0_
);
delete
[]
(
tile_data0_
);
tile_data0_
=
nullptr
;
}
if
(
tile_data1_
!=
nullptr
)
{
free
(
tile_data1_
);
delete
[]
(
tile_data1_
);
tile_data1_
=
nullptr
;
}
}
...
...
mindspore/lite/src/scheduler.cc
浏览文件 @
13f52ef1
...
...
@@ -175,11 +175,11 @@ kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector<tensor::Tensor *>
kernel
::
KernelKey
key
{
desc
.
arch
,
kNumberTypeFloat16
,
desc
.
type
};
kernel
=
KernelFactory
::
GetInstance
()
->
GetKernel
(
inputs
,
outputs
,
primitive
,
context_
,
key
);
if
(
kernel
!=
nullptr
)
{
MS_LOG
(
INFO
)
<<
"Get fp16 op success."
;
MS_LOG
(
DEBUG
)
<<
"Get fp16 op success."
;
kernel
->
set_desc
(
desc
);
return
kernel
;
}
MS_LOG
(
INFO
)
<<
"Get fp16 op failed, back to fp32 op."
;
MS_LOG
(
DEBUG
)
<<
"Get fp16 op failed, back to fp32 op."
;
kernel
=
KernelFactory
::
GetInstance
()
->
GetKernel
(
inputs
,
outputs
,
primitive
,
context_
,
desc
);
}
else
{
kernel
=
KernelFactory
::
GetInstance
()
->
GetKernel
(
inputs
,
outputs
,
primitive
,
context_
,
desc
);
...
...
mindspore/lite/tools/benchmark/benchmark.cc
浏览文件 @
13f52ef1
...
...
@@ -34,7 +34,7 @@ int Benchmark::GenerateRandomData(size_t size, void *data) {
for
(
size_t
i
=
0
;
i
<
size
;
i
++
)
{
castedData
[
i
]
=
static_cast
<
char
>
(
i
);
}
return
0
;
return
RET_OK
;
}
int
Benchmark
::
GenerateInputData
()
{
...
...
@@ -53,7 +53,7 @@ int Benchmark::GenerateInputData() {
return
status
;
}
}
return
0
;
return
RET_OK
;
}
int
Benchmark
::
LoadInput
()
{
...
...
@@ -70,12 +70,12 @@ int Benchmark::LoadInput() {
return
status
;
}
}
return
0
;
return
RET_OK
;
}
int
Benchmark
::
ReadInputFile
()
{
if
(
msInputs
.
empty
())
{
return
0
;
return
RET_OK
;
}
if
(
this
->
_flags
->
inDataType
==
kImage
)
{
...
...
@@ -104,7 +104,7 @@ int Benchmark::ReadInputFile() {
memcpy
(
inputData
,
binBuf
,
tensorDataSize
);
}
}
return
0
;
return
RET_OK
;
}
// calibData is FP32
...
...
@@ -114,13 +114,13 @@ int Benchmark::ReadCalibData() {
std
::
ifstream
inFile
(
calibDataPath
);
if
(
!
inFile
.
good
())
{
MS_LOG
(
ERROR
)
<<
"file: "
<<
calibDataPath
<<
" is not exist"
;
return
1
;
return
RET_ERROR
;
}
if
(
!
inFile
.
is_open
())
{
MS_LOG
(
ERROR
)
<<
"file: "
<<
calibDataPath
<<
" open failed"
;
inFile
.
close
();
return
1
;
return
RET_ERROR
;
}
std
::
string
line
;
...
...
@@ -155,7 +155,7 @@ int Benchmark::ReadCalibData() {
}
inFile
.
close
();
MS_LOG
(
INFO
)
<<
"Finish reading calibData file"
;
return
0
;
return
RET_OK
;
}
// tensorData need to be converter first
...
...
@@ -182,7 +182,7 @@ float Benchmark::CompareData(const std::string &nodeName, std::vector<int> msSha
}
oss
<<
") are different"
;
MS_LOG
(
ERROR
)
<<
"%s"
,
oss
.
str
().
c_str
();
return
-
1
;
return
RET_ERROR
;
}
size_t
errorCount
=
0
;
float
meanError
=
0
;
...
...
@@ -218,7 +218,7 @@ float Benchmark::CompareData(const std::string &nodeName, std::vector<int> msSha
return
meanError
;
}
else
{
MS_LOG
(
INFO
)
<<
"%s is not in Source Model output"
,
nodeName
.
c_str
();
return
-
1
;
return
RET_ERROR
;
}
}
...
...
@@ -257,14 +257,14 @@ int Benchmark::CompareOutput() {
if
(
meanBias
>
this
->
_flags
->
accuracyThreshold
)
{
MS_LOG
(
ERROR
)
<<
"Mean bias of all nodes is too big: "
<<
meanBias
<<
"%%"
;
return
1
;
return
RET_ERROR
;
}
else
{
return
0
;
return
RET_OK
;
}
}
else
{
MS_LOG
(
ERROR
)
<<
"Error in CompareData"
;
std
::
cout
<<
"======================================================="
<<
std
::
endl
<<
std
::
endl
;
return
1
;
return
RET_ERROR
;
}
}
...
...
@@ -309,7 +309,7 @@ int Benchmark::MarkPerformance() {
_flags
->
modelPath
.
substr
(
_flags
->
modelPath
.
find_last_of
(
DELIM_SLASH
)
+
1
).
c_str
(),
_flags
->
numThreads
,
timeMin
/
1000.0
f
,
timeMax
/
1000.0
f
,
timeAvg
/
1000.0
f
);
}
return
0
;
return
RET_OK
;
}
int
Benchmark
::
MarkAccuracy
()
{
...
...
@@ -341,7 +341,7 @@ int Benchmark::MarkAccuracy() {
MS_LOG
(
ERROR
)
<<
"Compare output error "
<<
status
;
return
status
;
}
return
0
;
return
RET_OK
;
}
int
Benchmark
::
RunBenchmark
(
const
std
::
string
&
deviceType
)
{
...
...
@@ -353,15 +353,25 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
size_t
size
=
0
;
char
*
graphBuf
=
ReadFile
(
_flags
->
modelPath
.
c_str
(),
&
size
);
if
(
graphBuf
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"
Load graph
failed while running %s"
,
modelName
.
c_str
();
return
1
;
MS_LOG
(
ERROR
)
<<
"
Read model file
failed while running %s"
,
modelName
.
c_str
();
return
RET_ERROR
;
}
auto
model
=
lite
::
Model
::
Import
(
graphBuf
,
size
);
auto
context
=
new
lite
::
Context
;
if
(
model
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"Import model file failed while running %s"
,
modelName
.
c_str
();
delete
[](
graphBuf
);
return
RET_ERROR
;
}
delete
[](
graphBuf
);
auto
context
=
new
(
std
::
nothrow
)
lite
::
Context
;
if
(
context
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"New context failed while running %s"
,
modelName
.
c_str
();
return
RET_ERROR
;
}
if
(
_flags
->
device
==
"CPU"
)
{
context
->
device_ctx_
.
type
=
lite
::
DT_CPU
;
}
else
if
(
_flags
->
device
==
"GPU"
)
{
context
->
device_ctx_
.
type
=
lite
::
DT_GPU
;
context
->
device_ctx_
.
type
=
lite
::
DT_GPU
;
}
else
{
context
->
device_ctx_
.
type
=
lite
::
DT_NPU
;
}
...
...
@@ -375,8 +385,15 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
}
context
->
thread_num_
=
_flags
->
numThreads
;
session
=
session
::
LiteSession
::
CreateSession
(
context
);
delete
(
context
);
if
(
session
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"CreateSession failed while running %s"
,
modelName
.
c_str
();
return
RET_ERROR
;
}
auto
ret
=
session
->
CompileGraph
(
model
.
get
());
if
(
ret
!=
RET_OK
)
{
MS_LOG
(
ERROR
)
<<
"CompileGraph failed while running %s"
,
modelName
.
c_str
();
delete
(
session
);
return
ret
;
}
msInputs
=
session
->
GetInputs
();
...
...
@@ -394,21 +411,21 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
auto
status
=
LoadInput
();
if
(
status
!=
0
)
{
MS_LOG
(
ERROR
)
<<
"Generate input data error"
;
delete
graphBuf
;
delete
(
session
)
;
return
status
;
}
if
(
!
_flags
->
calibDataPath
.
empty
())
{
status
=
MarkAccuracy
();
if
(
status
!=
0
)
{
MS_LOG
(
ERROR
)
<<
"Run MarkAccuracy error: %d"
<<
status
;
delete
graphBuf
;
delete
(
session
)
;
return
status
;
}
}
else
{
status
=
MarkPerformance
();
if
(
status
!=
0
)
{
MS_LOG
(
ERROR
)
<<
"Run MarkPerformance error: %d"
<<
status
;
delete
graphBuf
;
delete
(
session
)
;
return
status
;
}
}
...
...
@@ -422,8 +439,8 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
calibData
.
clear
();
}
delete
graphBuf
;
return
0
;
delete
(
session
)
;
return
RET_OK
;
}
void
BenchmarkFlags
::
InitInputDataList
()
{
...
...
@@ -488,10 +505,10 @@ int Benchmark::Init() {
_flags
->
InitResizeDimsList
();
if
(
!
_flags
->
resizeDims
.
empty
()
&&
_flags
->
resizeDims
.
size
()
!=
_flags
->
input_data_list
.
size
())
{
MS_LOG
(
ERROR
)
<<
"Size of input resizeDims should be equal to size of input inDataPath"
;
return
1
;
return
RET_ERROR
;
}
return
0
;
return
RET_OK
;
}
int
RunBenchmark
(
int
argc
,
const
char
**
argv
)
{
...
...
@@ -501,19 +518,19 @@ int RunBenchmark(int argc, const char **argv) {
if
(
err
.
IsSome
())
{
std
::
cerr
<<
err
.
Get
()
<<
std
::
endl
;
std
::
cerr
<<
flags
.
Usage
()
<<
std
::
endl
;
return
-
1
;
return
RET_ERROR
;
}
if
(
flags
.
help
)
{
std
::
cerr
<<
flags
.
Usage
()
<<
std
::
endl
;
return
0
;
return
RET_OK
;
}
Benchmark
mBenchmark
(
&
flags
);
auto
status
=
mBenchmark
.
Init
();
if
(
status
!=
0
)
{
MS_LOG
(
ERROR
)
<<
"Benchmark init Error : "
<<
status
;
return
1
;
return
RET_ERROR
;
}
if
(
flags
.
device
==
"NPU"
)
{
...
...
@@ -525,12 +542,12 @@ int RunBenchmark(int argc, const char **argv) {
if
(
status
!=
0
)
{
MS_LOG
(
ERROR
)
<<
"Run Benchmark "
<<
flags
.
modelPath
.
substr
(
flags
.
modelPath
.
find_last_of
(
DELIM_SLASH
)
+
1
).
c_str
()
<<
" Failed : "
<<
status
;
return
1
;
return
RET_ERROR
;
}
MS_LOG
(
INFO
)
<<
"Run Benchmark "
<<
flags
.
modelPath
.
substr
(
flags
.
modelPath
.
find_last_of
(
DELIM_SLASH
)
+
1
).
c_str
()
<<
" Success."
;
return
0
;
return
RET_OK
;
}
}
// namespace lite
}
// namespace mindspore
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录