Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
e5fd3ce2
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e5fd3ce2
编写于
8月 18, 2020
作者:
C
cjh9368
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add aware quant testcase
上级
e60c0b60
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
38 addition
and
4 deletion
+38
-4
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c
...pore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c
+0
-3
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c
...e/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c
+1
-1
mindspore/lite/test/models_tflite_awaretraining.cfg
mindspore/lite/test/models_tflite_awaretraining.cfg
+1
-0
mindspore/lite/test/run_benchmark_nets.sh
mindspore/lite/test/run_benchmark_nets.sh
+33
-0
mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc
...lite/tools/converter/parser/tflite/tflite_model_parser.cc
+3
-0
未找到文件。
mindspore/lite/src/runtime/kernel/arm/nnacl/fp32/common_func.c
浏览文件 @
e5fd3ce2
...
...
@@ -65,9 +65,6 @@ void MatrixMultiAdd(float *c11, float *c12, float *c21, float *c22, float *x_ptr
void
PostConvFuncComm
(
const
float
*
src_ptr_
,
float
*
out_ptr
,
const
float
*
bias_ptr
,
size_t
output_channel
,
size_t
plane_size
,
size_t
stride
,
bool
is_relu
,
bool
is_relu6
,
int
size
)
{
if
(
size
==
0
)
{
return
;
}
for
(
int
oc
=
0
;
oc
<
output_channel
;
oc
++
)
{
int
oc_div
=
oc
/
size
,
oc_mod
=
oc
%
size
;
for
(
int
hw
=
0
;
hw
<
plane_size
;
hw
++
)
{
...
...
mindspore/lite/src/runtime/kernel/arm/nnacl/int8/conv_depthwise_int8.c
浏览文件 @
e5fd3ce2
...
...
@@ -54,7 +54,7 @@ void DepthwiseBorderPixelInt8(int8_t *dst, const int16_t *src, const int16_t *we
}
tmp_buffer
[
c
]
+=
bias
[
c
];
tmp_buffer
[
c
]
=
RoundingDivideByPOT
(
SaturatingRoundingDoublingHighMul
(
tmp_buffer
[
c
]
*
(
1
<<
(
unsigned
int
)
left
),
multiplier
),
right
);
SaturatingRoundingDoublingHighMul
(
tmp_buffer
[
c
]
*
(
1
<<
(
unsigned
int
)
left
),
multiplier
),
-
right
);
tmp_buffer
[
c
]
+=
out_zp
;
tmp_buffer
[
c
]
=
MSMAX
(
tmp_buffer
[
c
],
acc_min
);
tmp_buffer
[
c
]
=
MSMIN
(
tmp_buffer
[
c
],
acc_max
);
...
...
mindspore/lite/test/models_tflite_awaretraining.cfg
0 → 100644
浏览文件 @
e5fd3ce2
video_infer.tflite
mindspore/lite/test/run_benchmark_nets.sh
浏览文件 @
e5fd3ce2
...
...
@@ -86,6 +86,27 @@ function Run_x86() {
fi
done
<
${
models_tflite_posttraining_config
}
# Run tflite aware training quantization converted models:
while
read
line
;
do
model_name
=
${
line
}
if
[[
$model_name
==
\#
*
]]
;
then
continue
fi
echo
${
model_name
}
echo
'cd '
${
convertor_path
}
'/MSLite-*-linux_x86_64'
cd
${
convertor_path
}
/MSLite-
*
-linux_x86_64
||
return
1
echo
'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib;./benchmark/benchmark --modelPath='
${
ms_models_path
}
'/'
${
model_name
}
'.ms --inDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --calibDataPath=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'
${
model_name
}
'.ms.out --warmUpLoopCount=1 --loopCount=1 --numThreads=1'
||
return
1
export
LD_LIBRARY_PATH
=
$LD_LIBRARY_PATH
:./lib
;
./benchmark/benchmark
--modelPath
=
${
ms_models_path
}
/
${
model_name
}
.ms
--inDataPath
=
/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/
${
model_name
}
.ms.bin
--calibDataPath
=
/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/
${
model_name
}
.ms.out
--warmUpLoopCount
=
1
--loopCount
=
1
--numThreads
=
1
if
[
$?
=
0
]
;
then
run_result
=
'Run_x86: '
${
model_name
}
'_awaretraining pass'
echo
${
run_result
}
>>
${
run_benchmark_result_file
}
else
run_result
=
'Run_x86: '
${
model_name
}
'_awaretraining fail <<===========================this is the failed case'
echo
${
run_result
}
>>
${
run_benchmark_result_file
}
return
1
fi
done
<
${
models_tflite_awaretraining_config
}
# Run mindspore converted models:
while
read
line
;
do
model_name
=
${
line
}
...
...
@@ -237,6 +258,7 @@ cd ${convertor_path}/MSLite-*-linux_x86_64 || exit 1
# Set models config filepath
models_tflite_config
=
${
basepath
}
/models_tflite.cfg
models_caffe_config
=
${
basepath
}
/models_caffe.cfg
models_tflite_awaretraining_config
=
${
basepath
}
/models_tflite_awaretraining.cfg
models_tflite_posttraining_config
=
${
basepath
}
/models_tflite_posttraining.cfg
models_onnx_config
=
${
basepath
}
/models_onnx.cfg
models_mindspore_config
=
${
basepath
}
/models_mindspore.cfg
...
...
@@ -303,6 +325,17 @@ while read line; do
./converter_lite
--fmk
=
TFLITE
--modelFile
=
$models_path
/
${
model_name
}
--outputFile
=
${
ms_models_path
}
/
${
model_name
}
_posttraining
--quantType
=
PostTraining
--config_file
=
${
models_path
}
/
${
model_name
}
_posttraining.config
||
exit
1
done
<
${
models_tflite_posttraining_config
}
# Convert TFLite AwareTraining models:
while
read
line
;
do
model_name
=
${
line
}
if
[[
$model_name
==
\#
*
]]
;
then
continue
fi
echo
${
model_name
}
echo
'./converter_lite --fmk=TFLITE --modelFile='
${
models_path
}
'/'
${
model_name
}
' --outputFile='
${
ms_models_path
}
'/'
${
model_name
}
' --quantType=AwareTraining'
./converter_lite
--fmk
=
TFLITE
--modelFile
=
${
models_path
}
/
${
model_name
}
--outputFile
=
${
ms_models_path
}
/
${
model_name
}
--quantType
=
AwareTraining
||
exit
1
done
<
${
models_tflite_awaretraining_config
}
# Push to the arm and run benchmark:
# First:copy benchmark exe and so files to the server which connected to the phone
rm
-rf
${
basepath
}
/benchmark_test
...
...
mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc
浏览文件 @
e5fd3ce2
...
...
@@ -152,6 +152,9 @@ STATUS TfliteModelParser::ConvertTensor(const std::unique_ptr<tflite::SubGraphT>
auto
isConst
=
(
!
tensor_buffer
->
data
.
empty
());
if
(
isConst
)
{
CopyConstTensorData
(
tflite_model_buffer
,
tflite_tensor
.
get
(),
tensor
.
get
());
}
else
if
(
tensor
->
dataType
==
TypeId
::
kNumberTypeUInt8
)
{
// set in/out tensor to int8 to fit ms-lite op
tensor
->
dataType
=
TypeId
::
kNumberTypeInt8
;
}
// set tensor attr
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录