Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
4a76059f
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
4a76059f
编写于
8月 18, 2020
作者:
C
chengxianbin
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix quant export bug
上级
3fb58fcb
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
44 addition
and
9 deletion
+44
-9
mindspore/nn/layer/quant.py
mindspore/nn/layer/quant.py
+2
-7
mindspore/train/quant/quant.py
mindspore/train/quant/quant.py
+3
-2
mindspore/train/quant/quant_utils.py
mindspore/train/quant/quant_utils.py
+39
-0
未找到文件。
mindspore/nn/layer/quant.py
浏览文件 @
4a76059f
...
...
@@ -607,7 +607,6 @@ class Conv2dBnWithoutFoldQuant(Cell):
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
has_bn (bool): Specifies to used batchnorm or not. Default: False.
eps (float): Parameters for BatchNormal. Default: 1e-5.
momentum (float): Parameters for BatchNormal op. Default: 0.997.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
...
...
@@ -641,7 +640,6 @@ class Conv2dBnWithoutFoldQuant(Cell):
dilation
=
1
,
group
=
1
,
has_bias
=
False
,
has_bn
=
True
,
eps
=
1e-5
,
momentum
=
0.997
,
weight_init
=
'normal'
,
...
...
@@ -693,17 +691,14 @@ class Conv2dBnWithoutFoldQuant(Cell):
symmetric
=
symmetric
,
narrow_range
=
narrow_range
,
quant_delay
=
quant_delay
)
self
.
has_bn
=
validator
.
check_bool
(
"has_bn"
,
has_bn
)
if
has_bn
:
self
.
batchnorm
=
BatchNorm2d
(
out_channels
,
eps
=
eps
,
momentum
=
momentum
)
self
.
batchnorm
=
BatchNorm2d
(
out_channels
,
eps
=
eps
,
momentum
=
momentum
)
def
construct
(
self
,
x
):
weight
=
self
.
fake_quant_weight
(
self
.
weight
)
out
=
self
.
conv
(
x
,
weight
)
if
self
.
has_bias
:
out
=
self
.
bias_add
(
out
,
self
.
bias
)
if
self
.
has_bn
:
out
=
self
.
batchnorm
(
out
)
out
=
self
.
batchnorm
(
out
)
return
out
def
extend_repr
(
self
):
...
...
mindspore/train/quant/quant.py
浏览文件 @
4a76059f
...
...
@@ -208,7 +208,6 @@ class ConvertToQuantNetwork:
group
=
conv_inner
.
group
,
eps
=
bn_inner
.
eps
,
momentum
=
bn_inner
.
momentum
,
has_bn
=
True
,
quant_delay
=
self
.
weight_qdelay
,
per_channel
=
self
.
weight_channel
,
num_bits
=
self
.
weight_bits
,
...
...
@@ -378,8 +377,10 @@ class ExportToQuantInferNetwork:
if
isinstance
(
cell_core
,
(
quant
.
DenseQuant
,
quant
.
Conv2dQuant
)):
if
cell_core
.
has_bias
:
bias
=
cell_core
.
bias
.
data
.
asnumpy
()
elif
isinstance
(
cell_core
,
(
quant
.
Conv2dBnFoldQuant
,
quant
.
Conv2dBnWithoutFoldQuant
)
):
elif
isinstance
(
cell_core
,
quant
.
Conv2dBnFoldQuant
):
weight
,
bias
=
quant_utils
.
fold_batchnorm
(
weight
,
cell_core
)
elif
isinstance
(
cell_core
,
quant
.
Conv2dBnWithoutFoldQuant
):
weight
,
bias
=
quant_utils
.
without_fold_batchnorm
(
weight
,
cell_core
)
# apply the quant
weight
=
quant_utils
.
weight2int
(
weight
,
scale_w
,
zp_w
)
...
...
mindspore/train/quant/quant_utils.py
浏览文件 @
4a76059f
...
...
@@ -211,3 +211,42 @@ def fold_batchnorm(weight, cell_quant):
weight
=
weight
*
_gamma
/
_sigma
bias
=
beta
-
gamma
*
mean
/
sigma
return
weight
,
bias
def
without_fold_batchnorm
(
weight
,
cell_quant
):
r
"""
Fold the batchnorm in `Conv2dBnWithoutFoldQuant` to weight.
Calculate from `FakeQuantWithMinMax`'s Parameter or Fake quant primitive.
Args:
weight (numpy.ndarray): Weight of `cell_quant`.
cell_quant (Cell): Object of `mindspore.nn.layer.Conv2dBnWithoutFoldQuant`.
Returns:
weight (numpy.ndarray): whihout folded weight.
bias (numpy.ndarray): without folded bias.
"""
variance
=
cell_quant
.
batchnorm
.
moving_variance
.
data
.
asnumpy
()
mean
=
cell_quant
.
batchnorm
.
moving_mean
.
data
.
asnumpy
()
gamma
=
cell_quant
.
batchnorm
.
gamma
.
data
.
asnumpy
()
beta
=
cell_quant
.
batchnorm
.
beta
.
data
.
asnumpy
()
epsilon
=
cell_quant
.
batchnorm
.
eps
sigma
=
np
.
sqrt
(
variance
+
epsilon
)
if
gamma
.
shape
[
0
]
==
weight
.
shape
[
0
]:
# `Conv2d` or `Dense` op weight
shape_list
=
[
-
1
]
+
[
1
]
*
len
(
weight
.
shape
[
1
:])
_gamma
=
gamma
.
reshape
(
shape_list
)
_sigma
=
sigma
.
reshape
(
shape_list
)
elif
gamma
.
shape
[
0
]
==
weight
.
shape
[
1
]:
# `DepthwiseConv2d` op weight
shape_list
=
[
1
,
-
1
]
+
[
1
]
*
len
(
weight
.
shape
[
2
:])
_gamma
=
gamma
.
reshape
(
shape_list
)
_sigma
=
sigma
.
reshape
(
shape_list
)
else
:
raise
ValueError
(
"Unsupported weight shape({})"
.
format
(
weight
.
shape
))
weight
=
weight
*
_gamma
/
_sigma
bias
=
beta
-
gamma
*
mean
/
sigma
return
weight
,
bias
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录