Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Xiaomi
Mace
提交
0654a658
Mace
项目概览
Xiaomi
/
Mace
通知
106
Star
40
Fork
27
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
Mace
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
0654a658
编写于
8月 31, 2018
作者:
李
李寅
提交者:
赵奇可
9月 10, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix dequantize output type
上级
95f63291
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
23 addition
and
9 deletion
+23
-9
mace/core/workspace.cc
mace/core/workspace.cc
+8
-6
mace/kernels/softmax.h
mace/kernels/softmax.h
+1
-1
mace/python/tools/converter_tool/base_converter.py
mace/python/tools/converter_tool/base_converter.py
+2
-0
mace/python/tools/converter_tool/transformer.py
mace/python/tools/converter_tool/transformer.py
+4
-0
tools/converter.py
tools/converter.py
+8
-2
未找到文件。
mace/core/workspace.cc
浏览文件 @
0654a658
...
...
@@ -178,12 +178,14 @@ MaceStatus Workspace::LoadModelTensor(const NetDef &net_def,
if
(
type
==
DeviceType
::
CPU
&&
net_def
.
has_quantize_info
())
{
for
(
const
auto
&
activation_info
:
net_def
.
quantize_info
().
activation_info
())
{
MACE_CHECK
(
HasTensor
(
activation_info
.
tensor_name
()),
"Quantize info exist for non-existed tensor"
,
activation_info
.
tensor_name
());
Tensor
*
tensor
=
GetTensor
(
activation_info
.
tensor_name
());
tensor
->
SetScale
(
activation_info
.
scale
());
tensor
->
SetZeroPoint
(
activation_info
.
zero_point
());
if
(
HasTensor
(
activation_info
.
tensor_name
()))
{
Tensor
*
tensor
=
GetTensor
(
activation_info
.
tensor_name
());
tensor
->
SetScale
(
activation_info
.
scale
());
tensor
->
SetZeroPoint
(
activation_info
.
zero_point
());
}
else
{
LOG
(
WARNING
)
<<
"Quantize info exists for non-existed tensor: "
<<
activation_info
.
tensor_name
();
}
}
}
...
...
mace/kernels/softmax.h
浏览文件 @
0654a658
...
...
@@ -123,7 +123,7 @@ struct SoftmaxFunctor<DeviceType::CPU, float> {
}
};
static
const
int
kInputDeltaIntBits
=
5
;
static
const
int
kInputDeltaIntBits
=
6
;
static
const
int
kSumExpIntBits
=
12
;
template
<
>
...
...
mace/python/tools/converter_tool/base_converter.py
浏览文件 @
0654a658
...
...
@@ -368,6 +368,8 @@ class ConverterOption(object):
TransformerRule
.
UPDATE_FLOAT_OP_DATA_TYPE
,
# Transform finalization
TransformerRule
.
ADD_MACE_INPUT_AND_OUTPUT_NODES
,
# for quantization entropy calibration use
TransformerRule
.
ADD_QUANTIZE_TENSOR_RANGE
,
TransformerRule
.
SORT_BY_EXECUTION
,
]
if
self
.
_quantize
:
...
...
mace/python/tools/converter_tool/transformer.py
浏览文件 @
0654a658
...
...
@@ -1638,6 +1638,7 @@ class Transformer(base_converter.ConverterInterface):
output_shape
=
op_def
.
output_shape
.
add
()
output_shape
.
dims
.
extend
(
self
.
_producer
[
output_node
.
name
].
output_shape
[
0
].
dims
)
op_def
.
output_type
.
extend
([
mace_pb2
.
DT_FLOAT
])
ConverterUtil
.
add_data_type_arg
(
op_def
,
mace_pb2
.
DT_UINT8
)
...
...
@@ -1647,6 +1648,9 @@ class Transformer(base_converter.ConverterInterface):
print
(
"Add quantize tensor range"
)
net
=
self
.
_model
range_file
=
self
.
_option
.
quantize_range_file
if
not
range_file
:
return
with
open
(
range_file
)
as
f
:
for
line
in
f
:
tensor_name
,
minmax
=
line
.
split
(
"@@"
)
...
...
tools/converter.py
浏览文件 @
0654a658
...
...
@@ -481,13 +481,15 @@ def format_model_config(flags):
DeviceType
.
CPU
:
0.999
,
DeviceType
.
GPU
:
0.995
,
DeviceType
.
HEXAGON
:
0.930
,
DeviceType
.
CPU
+
"_QUANTIZE"
:
0.980
,
}
for
k
,
v
in
six
.
iteritems
(
validation_threshold
):
if
k
.
upper
()
==
'DSP'
:
k
=
DeviceType
.
HEXAGON
if
k
.
upper
()
not
in
(
DeviceType
.
CPU
,
DeviceType
.
GPU
,
DeviceType
.
HEXAGON
):
DeviceType
.
HEXAGON
,
DeviceType
.
CPU
+
"_QUANTIZE"
):
raise
argparse
.
ArgumentTypeError
(
'Unsupported validation threshold runtime: %s'
%
k
)
threshold_dict
[
k
.
upper
()]
=
v
...
...
@@ -1251,6 +1253,10 @@ def run_specific_target(flags, configs, target_abi,
model_config
[
YAMLKeyword
.
weight_file_path
],
model_config
[
YAMLKeyword
.
weight_sha256_checksum
])
validate_type
=
device_type
if
model_config
[
YAMLKeyword
.
quantize
]
==
1
:
validate_type
=
device_type
+
"_QUANTIZE"
sh_commands
.
validate_model
(
abi
=
target_abi
,
serialno
=
serial_num
,
...
...
@@ -1266,7 +1272,7 @@ def run_specific_target(flags, configs, target_abi,
phone_data_dir
=
PHONE_DATA_DIR
,
input_data_types
=
subgraphs
[
0
][
YAMLKeyword
.
input_data_types
],
# noqa
caffe_env
=
flags
.
caffe_env
,
validation_threshold
=
subgraphs
[
0
][
YAMLKeyword
.
validation_threshold
][
devic
e_type
])
# noqa
validation_threshold
=
subgraphs
[
0
][
YAMLKeyword
.
validation_threshold
][
validat
e_type
])
# noqa
if
flags
.
report
and
flags
.
round
>
0
:
tuned
=
is_tuned
and
device_type
==
DeviceType
.
GPU
report_run_statistics
(
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录