Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
29fabd13
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
29fabd13
编写于
8月 31, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 31, 2020
浏览文件
操作
浏览文件
下载
差异文件
!5561 Fix C++ coding standard problem
Merge pull request !5561 from yeyunpeng2020/r0.7
上级
a43bd07c
794aeb8e
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
23 addition
and
29 deletion
+23
-29
mindspore/lite/java/native/runtime/ms_tensor.cpp
mindspore/lite/java/native/runtime/ms_tensor.cpp
+1
-1
mindspore/lite/src/common/file_utils.h
mindspore/lite/src/common/file_utils.h
+2
-3
mindspore/lite/tools/converter/parser/tflite/tflite_util.cc
mindspore/lite/tools/converter/parser/tflite/tflite_util.cc
+15
-18
model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h
...al/lite/image_classification/app/src/main/cpp/MSNetWork.h
+1
-3
model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp
...ge_classification/app/src/main/cpp/MindSporeNetnative.cpp
+4
-4
未找到文件。
mindspore/lite/java/native/runtime/ms_tensor.cpp
浏览文件 @
29fabd13
...
@@ -227,7 +227,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setByteBu
...
@@ -227,7 +227,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setByteBu
jobject
buffer
)
{
jobject
buffer
)
{
jbyte
*
p_data
=
reinterpret_cast
<
jbyte
*>
(
env
->
GetDirectBufferAddress
(
buffer
));
// get buffer poiter
jbyte
*
p_data
=
reinterpret_cast
<
jbyte
*>
(
env
->
GetDirectBufferAddress
(
buffer
));
// get buffer poiter
jlong
data_len
=
env
->
GetDirectBufferCapacity
(
buffer
);
// get buffer capacity
jlong
data_len
=
env
->
GetDirectBufferCapacity
(
buffer
);
// get buffer capacity
if
(
!
p_data
)
{
if
(
p_data
==
nullptr
)
{
MS_LOGE
(
"GetDirectBufferAddress return null"
);
MS_LOGE
(
"GetDirectBufferAddress return null"
);
return
NULL
;
return
NULL
;
}
}
...
...
mindspore/lite/src/common/file_utils.h
浏览文件 @
29fabd13
...
@@ -34,7 +34,7 @@ char *ReadFile(const char *file, size_t *size);
...
@@ -34,7 +34,7 @@ char *ReadFile(const char *file, size_t *size);
std
::
string
RealPath
(
const
char
*
path
);
std
::
string
RealPath
(
const
char
*
path
);
template
<
typename
T
>
template
<
typename
T
>
void
WriteToTxt
(
const
std
::
string
&
file_path
,
void
*
data
,
size_t
element_size
)
{
void
WriteToTxt
(
const
std
::
string
&
file_path
,
void
*
data
,
size_t
element_size
)
{
std
::
ofstream
out_file
;
std
::
ofstream
out_file
;
out_file
.
open
(
file_path
,
std
::
ios
::
out
);
out_file
.
open
(
file_path
,
std
::
ios
::
out
);
auto
real_data
=
reinterpret_cast
<
T
*>
(
data
);
auto
real_data
=
reinterpret_cast
<
T
*>
(
data
);
...
@@ -44,7 +44,7 @@ void WriteToTxt(const std::string& file_path, void *data, size_t element_size) {
...
@@ -44,7 +44,7 @@ void WriteToTxt(const std::string& file_path, void *data, size_t element_size) {
out_file
.
close
();
out_file
.
close
();
}
}
int
WriteToBin
(
const
std
::
string
&
file_path
,
void
*
data
,
size_t
size
);
int
WriteToBin
(
const
std
::
string
&
file_path
,
void
*
data
,
size_t
size
);
int
CompareOutputData
(
float
*
output_data
,
float
*
correct_data
,
int
data_size
);
int
CompareOutputData
(
float
*
output_data
,
float
*
correct_data
,
int
data_size
);
void
CompareOutput
(
float
*
output_data
,
std
::
string
file_path
);
void
CompareOutput
(
float
*
output_data
,
std
::
string
file_path
);
...
@@ -55,4 +55,3 @@ std::string GetAndroidPackagePath();
...
@@ -55,4 +55,3 @@ std::string GetAndroidPackagePath();
}
// namespace mindspore
}
// namespace mindspore
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_H_
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_H_
mindspore/lite/tools/converter/parser/tflite/tflite_util.cc
浏览文件 @
29fabd13
...
@@ -126,14 +126,10 @@ std::map<tflite::ActivationFunctionType, schema::ActivationType> tfMsActivationF
...
@@ -126,14 +126,10 @@ std::map<tflite::ActivationFunctionType, schema::ActivationType> tfMsActivationF
};
};
std
::
map
<
int
,
TypeId
>
type_map
=
{
std
::
map
<
int
,
TypeId
>
type_map
=
{
{
tflite
::
TensorType_FLOAT64
,
TypeId
::
kNumberTypeFloat64
},
{
tflite
::
TensorType_FLOAT64
,
TypeId
::
kNumberTypeFloat64
},
{
tflite
::
TensorType_FLOAT32
,
TypeId
::
kNumberTypeFloat32
},
{
tflite
::
TensorType_FLOAT32
,
TypeId
::
kNumberTypeFloat32
},
{
tflite
::
TensorType_FLOAT16
,
TypeId
::
kNumberTypeFloat16
},
{
tflite
::
TensorType_INT32
,
TypeId
::
kNumberTypeInt32
},
{
tflite
::
TensorType_FLOAT16
,
TypeId
::
kNumberTypeFloat16
},
{
tflite
::
TensorType_INT16
,
TypeId
::
kNumberTypeInt16
},
{
tflite
::
TensorType_INT8
,
TypeId
::
kNumberTypeInt8
},
{
tflite
::
TensorType_INT32
,
TypeId
::
kNumberTypeInt32
},
{
tflite
::
TensorType_INT64
,
TypeId
::
kNumberTypeInt64
},
{
tflite
::
TensorType_UINT8
,
TypeId
::
kNumberTypeUInt8
},
{
tflite
::
TensorType_INT16
,
TypeId
::
kNumberTypeInt16
},
{
tflite
::
TensorType_INT8
,
TypeId
::
kNumberTypeInt8
},
{
tflite
::
TensorType_INT64
,
TypeId
::
kNumberTypeInt64
},
{
tflite
::
TensorType_UINT8
,
TypeId
::
kNumberTypeUInt8
},
{
tflite
::
TensorType_BOOL
,
TypeId
::
kNumberTypeBool
},
{
tflite
::
TensorType_BOOL
,
TypeId
::
kNumberTypeBool
},
};
};
...
@@ -190,11 +186,8 @@ size_t GetDataTypeSize(const TypeId &data_type) {
...
@@ -190,11 +186,8 @@ size_t GetDataTypeSize(const TypeId &data_type) {
}
}
}
}
STATUS
getPaddingParam
(
const
std
::
unique_ptr
<
tflite
::
TensorT
>
&
tensor
,
STATUS
getPaddingParam
(
const
std
::
unique_ptr
<
tflite
::
TensorT
>
&
tensor
,
schema
::
PadMode
pad_mode
,
int
strideH
,
schema
::
PadMode
pad_mode
,
int
strideW
,
int
windowH
,
int
windowW
,
std
::
vector
<
int
>
*
params
)
{
int
strideH
,
int
strideW
,
int
windowH
,
int
windowW
,
std
::
vector
<
int
>
*
params
)
{
if
(
tensor
==
nullptr
)
{
if
(
tensor
==
nullptr
)
{
MS_LOG
(
ERROR
)
<<
"the input tensor is null"
;
MS_LOG
(
ERROR
)
<<
"the input tensor is null"
;
return
RET_ERROR
;
return
RET_ERROR
;
...
@@ -208,12 +201,18 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
...
@@ -208,12 +201,18 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
auto
shape
=
tensor
->
shape
;
auto
shape
=
tensor
->
shape
;
int
H_input
=
shape
.
at
(
1
);
int
H_input
=
shape
.
at
(
1
);
int
W_input
=
shape
.
at
(
2
);
int
W_input
=
shape
.
at
(
2
);
if
(
strideH
==
0
)
{
MS_LOG
(
ERROR
)
<<
"strideH is zero"
;
return
RET_ERROR
;
}
int
H_output
=
ceil
(
H_input
*
1.0
/
strideH
);
int
H_output
=
ceil
(
H_input
*
1.0
/
strideH
);
int
pad_needed_H
=
(
H_output
-
1
)
*
strideH
+
windowH
-
H_input
;
int
pad_needed_H
=
(
H_output
-
1
)
*
strideH
+
windowH
-
H_input
;
padUp
=
floor
(
pad_needed_H
/
2.0
);
padUp
=
floor
(
pad_needed_H
/
2.0
);
padDown
=
pad_needed_H
-
padUp
;
padDown
=
pad_needed_H
-
padUp
;
if
(
strideW
==
0
)
{
MS_LOG
(
ERROR
)
<<
"strideW is zero"
;
return
RET_ERROR
;
}
int
W_output
=
ceil
(
W_input
*
1.0
/
strideW
);
int
W_output
=
ceil
(
W_input
*
1.0
/
strideW
);
int
pad_needed_W
=
(
W_output
-
1
)
*
strideW
+
windowW
-
W_input
;
int
pad_needed_W
=
(
W_output
-
1
)
*
strideW
+
windowW
-
W_input
;
padLeft
=
floor
(
pad_needed_W
/
2.0
);
padLeft
=
floor
(
pad_needed_W
/
2.0
);
...
@@ -227,9 +226,7 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
...
@@ -227,9 +226,7 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
return
RET_OK
;
return
RET_OK
;
}
}
void
Split
(
const
std
::
string
&
src_str
,
void
Split
(
const
std
::
string
&
src_str
,
std
::
vector
<
std
::
string
>
*
dst_str
,
const
std
::
string
&
chr
)
{
std
::
vector
<
std
::
string
>
*
dst_str
,
const
std
::
string
&
chr
)
{
std
::
string
::
size_type
p1
=
0
,
p2
=
src_str
.
find
(
chr
);
std
::
string
::
size_type
p1
=
0
,
p2
=
src_str
.
find
(
chr
);
while
(
std
::
string
::
npos
!=
p2
)
{
while
(
std
::
string
::
npos
!=
p2
)
{
dst_str
->
push_back
(
src_str
.
substr
(
p1
,
p2
-
p1
));
dst_str
->
push_back
(
src_str
.
substr
(
p1
,
p2
-
p1
));
...
...
model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h
浏览文件 @
29fabd13
...
@@ -32,7 +32,6 @@
...
@@ -32,7 +32,6 @@
#include <memory>
#include <memory>
#include <utility>
#include <utility>
struct
ImgDims
{
struct
ImgDims
{
int
channel
=
0
;
int
channel
=
0
;
int
width
=
0
;
int
width
=
0
;
...
@@ -43,8 +42,6 @@ struct ImgDims {
...
@@ -43,8 +42,6 @@ struct ImgDims {
std::shared_ptr<mindspore::session::LiteSession> sess = nullptr;
std::shared_ptr<mindspore::session::LiteSession> sess = nullptr;
};*/
};*/
class
MSNetWork
{
class
MSNetWork
{
public:
public:
MSNetWork
();
MSNetWork
();
...
@@ -55,6 +52,7 @@ class MSNetWork {
...
@@ -55,6 +52,7 @@ class MSNetWork {
int
ReleaseNets
(
void
);
int
ReleaseNets
(
void
);
protected:
mindspore
::
session
::
LiteSession
*
session
;
mindspore
::
session
::
LiteSession
*
session
;
mindspore
::
lite
::
Model
*
model
;
mindspore
::
lite
::
Model
*
model
;
static
const
int
RET_CATEGORY_SUM
=
601
;
static
const
int
RET_CATEGORY_SUM
=
601
;
...
...
model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp
浏览文件 @
29fabd13
...
@@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) {
...
@@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) {
imgFloatTmp
.
convertTo
(
imgResized256
,
CV_32FC3
,
normalizMin
/
normalizMax
);
imgFloatTmp
.
convertTo
(
imgResized256
,
CV_32FC3
,
normalizMin
/
normalizMax
);
int
offsetX
=
16
;
const
int
offsetX
=
16
;
int
offsetY
=
16
;
const
int
offsetY
=
16
;
int
cropWidth
=
224
;
const
int
cropWidth
=
224
;
int
cropHeight
=
224
;
const
int
cropHeight
=
224
;
// Standardization processing.
// Standardization processing.
float
meanR
=
0.485
;
float
meanR
=
0.485
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录