Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
6d557a98
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
6d557a98
编写于
8月 10, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
8月 10, 2020
浏览文件
操作
浏览文件
下载
差异文件
!4199 [MS][QUANT] mindspore model zoo example for hand make quant graph
Merge pull request !4199 from chenzhongming/quant
上级
767c04ef
94fd9dd8
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
232 addition
and
17 deletion
+232
-17
mindspore/nn/layer/quant.py
mindspore/nn/layer/quant.py
+13
-16
mindspore/train/quant/quant.py
mindspore/train/quant/quant.py
+1
-1
model_zoo/official/cv/mobilenetv2_quant/src/mobilenetV2_quant.py
...oo/official/cv/mobilenetv2_quant/src/mobilenetV2_quant.py
+218
-0
未找到文件。
mindspore/nn/layer/quant.py
浏览文件 @
6d557a98
...
...
@@ -396,7 +396,7 @@ class FakeQuantWithMinMax(Cell):
class
Conv2dBnFoldQuant
(
Cell
):
r
"""
2D convolution with BatchNormal op folded
layer
.
2D convolution with BatchNormal op folded
construct
.
This part is a more detailed overview of Conv2d op.
...
...
@@ -434,10 +434,9 @@ class Conv2dBnFoldQuant(Cell):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> batchnorm_quant = nn.Conv2dBnFoldQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid",
>>> dilation=(1, 1))
>>> input_x = Tensor(np.random.randint(-2, 2, (2, 1, 1, 3)), mindspore.float32)
>>> result = batchnorm_quant(input_x)
>>> conv2d_bn = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid")
>>> x = Tensor(np.random.randint(-2, 2, (2, 1, 1, 3)), mindspore.float32)
>>> y = conv2d_bn(x)
"""
def
__init__
(
self
,
...
...
@@ -508,7 +507,7 @@ class Conv2dBnFoldQuant(Cell):
channel_axis
=
0
self
.
weight
=
Parameter
(
initializer
(
weight_init
,
weight_shape
),
name
=
'weight'
)
# initialize
batchn
orm Parameter
# initialize
BatchN
orm Parameter
self
.
gamma
=
Parameter
(
initializer
(
gamma_init
,
[
out_channels
]),
name
=
'gamma'
)
self
.
beta
=
Parameter
(
initializer
(
beta_init
,
[
out_channels
]),
name
=
'beta'
)
self
.
moving_mean
=
Parameter
(
initializer
(
mean_init
,
[
out_channels
]),
name
=
'moving_mean'
,
requires_grad
=
False
)
...
...
@@ -583,7 +582,7 @@ class Conv2dBnFoldQuant(Cell):
class
Conv2dBnWithoutFoldQuant
(
Cell
):
r
"""
2D convolution + batchnorm without fold with fake quant
op layer
.
2D convolution + batchnorm without fold with fake quant
construct
.
This part is a more detailed overview of Conv2d op.
...
...
@@ -617,10 +616,9 @@ class Conv2dBnWithoutFoldQuant(Cell):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
>>> dilation=(1, 1))
>>> input_x = Tensor(np.random.randint(-2, 2, (2, 1, 1, 3)), mstype.float32)
>>> result = conv2d_quant(input_x)
>>> conv2d_quant = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid")
>>> x = Tensor(np.random.randint(-2, 2, (2, 1, 1, 3)), mstype.float32)
>>> y = conv2d_quant(x)
"""
def
__init__
(
self
,
...
...
@@ -687,7 +685,7 @@ class Conv2dBnWithoutFoldQuant(Cell):
quant_delay
=
quant_delay
)
self
.
has_bn
=
validator
.
check_bool
(
"has_bn"
,
has_bn
)
if
has_bn
:
self
.
batchnorm
=
BatchNorm2d
(
out_channels
)
self
.
batchnorm
=
BatchNorm2d
(
out_channels
,
eps
=
eps
,
momentum
=
momentum
)
def
construct
(
self
,
x
):
weight
=
self
.
fake_quant_weight
(
self
.
weight
)
...
...
@@ -740,10 +738,9 @@ class Conv2dQuant(Cell):
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid",
>>> dilation=(1, 1))
>>> input_x = Tensor(np.random.randint(-2, 2, (2, 1, 1, 3)), mindspore.float32)
>>> result = conv2d_quant(input_x)
>>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid")
>>> x = Tensor(np.random.randint(-2, 2, (2, 1, 1, 3)), mindspore.float32)
>>> y = conv2d_quant(x)
"""
def
__init__
(
self
,
...
...
mindspore/train/quant/quant.py
浏览文件 @
6d557a98
...
...
@@ -473,7 +473,7 @@ def export(network, *inputs, file_name, mean=127.5, std_dev=127.5, file_format='
def
convert_quant_network
(
network
,
bn_fold
=
False
,
freeze_bn
=
0
,
freeze_bn
=
1000
0
,
quant_delay
=
(
0
,
0
),
num_bits
=
(
8
,
8
),
per_channel
=
(
False
,
False
),
...
...
model_zoo/official/cv/mobilenetv2_quant/src/mobilenetV2_quant.py
0 → 100644
浏览文件 @
6d557a98
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MobileNetV2 Quant model define"""
import
mindspore.nn
as
nn
from
mindspore.ops
import
operations
as
P
__all__
=
[
'mobilenetV2_quant'
]
_quant_delay
=
200
_ema_decay
=
0.999
_symmetric
=
False
_per_channel
=
False
def
_make_divisible
(
v
,
divisor
,
min_value
=
None
):
if
min_value
is
None
:
min_value
=
divisor
new_v
=
max
(
min_value
,
int
(
v
+
divisor
/
2
)
//
divisor
*
divisor
)
# Make sure that round down does not go down by more than 10%.
if
new_v
<
0.9
*
v
:
new_v
+=
divisor
return
new_v
class
GlobalAvgPooling
(
nn
.
Cell
):
"""
Global avg pooling definition.
Args:
Returns:
Tensor, output tensor.
Examples:
>>> GlobalAvgPooling()
"""
def
__init__
(
self
):
super
(
GlobalAvgPooling
,
self
).
__init__
()
self
.
mean
=
P
.
ReduceMean
(
keep_dims
=
False
)
def
construct
(
self
,
x
):
x
=
self
.
mean
(
x
,
(
2
,
3
))
return
x
class
ConvBNReLU
(
nn
.
Cell
):
"""
Convolution/Depthwise fused with Batchnorm and ReLU block definition.
Args:
in_planes (int): Input channel.
out_planes (int): Output channel.
kernel_size (int): Input kernel size.
stride (int): Stride size for the first convolutional layer. Default: 1.
groups (int): channel group. Convolution is 1 while Depthiwse is input channel. Default: 1.
Returns:
Tensor, output tensor.
Examples:
>>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1)
"""
def
__init__
(
self
,
in_planes
,
out_planes
,
kernel_size
=
3
,
stride
=
1
,
groups
=
1
):
super
(
ConvBNReLU
,
self
).
__init__
()
padding
=
(
kernel_size
-
1
)
//
2
conv
=
nn
.
Conv2dBnFoldQuant
(
in_planes
,
out_planes
,
kernel_size
,
stride
,
pad_mode
=
'pad'
,
padding
=
padding
,
quant_delay
=
_quant_delay
,
group
=
groups
,
per_channel
=
_per_channel
,
symmetric
=
_symmetric
)
layers
=
[
conv
,
nn
.
ReLU
()]
self
.
features
=
nn
.
SequentialCell
(
layers
)
self
.
fake
=
nn
.
FakeQuantWithMinMax
(
ema
=
True
,
ema_decay
=
_ema_decay
,
min_init
=
0
,
quant_delay
=
_quant_delay
)
def
construct
(
self
,
x
):
output
=
self
.
features
(
x
)
output
=
self
.
fake
(
output
)
return
output
class
InvertedResidual
(
nn
.
Cell
):
"""
Mobilenetv2 residual block definition.
Args:
inp (int): Input channel.
oup (int): Output channel.
stride (int): Stride size for the first convolutional layer. Default: 1.
expand_ratio (int): expand ration of input channel
Returns:
Tensor, output tensor.
Examples:
>>> ResidualBlock(3, 256, 1, 1)
"""
def
__init__
(
self
,
inp
,
oup
,
stride
,
expand_ratio
):
super
(
InvertedResidual
,
self
).
__init__
()
assert
stride
in
[
1
,
2
]
hidden_dim
=
int
(
round
(
inp
*
expand_ratio
))
self
.
use_res_connect
=
stride
==
1
and
inp
==
oup
layers
=
[]
if
expand_ratio
!=
1
:
layers
.
append
(
ConvBNReLU
(
inp
,
hidden_dim
,
kernel_size
=
1
))
layers
.
extend
([
# dw
ConvBNReLU
(
hidden_dim
,
hidden_dim
,
stride
=
stride
,
groups
=
hidden_dim
),
# pw-linear
nn
.
Conv2dBnFoldQuant
(
hidden_dim
,
oup
,
kernel_size
=
1
,
stride
=
1
,
pad_mode
=
'pad'
,
padding
=
0
,
group
=
1
,
per_channel
=
_per_channel
,
symmetric
=
_symmetric
,
quant_delay
=
_quant_delay
),
nn
.
FakeQuantWithMinMax
(
ema
=
True
,
ema_decay
=
_ema_decay
,
quant_delay
=
_quant_delay
)
])
self
.
conv
=
nn
.
SequentialCell
(
layers
)
self
.
add
=
P
.
TensorAdd
()
self
.
add_fake
=
nn
.
FakeQuantWithMinMax
(
ema
=
True
,
ema_decay
=
_ema_decay
,
quant_delay
=
_quant_delay
)
def
construct
(
self
,
x
):
identity
=
x
x
=
self
.
conv
(
x
)
if
self
.
use_res_connect
:
x
=
self
.
add
(
identity
,
x
)
x
=
self
.
add_fake
(
x
)
return
x
class
MobileNetV2Quant
(
nn
.
Cell
):
"""
MobileNetV2Quant architecture.
Args:
class_num (Cell): number of classes.
width_mult (int): Channels multiplier for round to 8/16 and others. Default is 1.
has_dropout (bool): Is dropout used. Default is false
inverted_residual_setting (list): Inverted residual settings. Default is None
round_nearest (list): Channel round to . Default is 8
Returns:
Tensor, output tensor.
Examples:
>>> MobileNetV2Quant(num_classes=1000)
"""
def
__init__
(
self
,
num_classes
=
1000
,
width_mult
=
1.
,
has_dropout
=
False
,
inverted_residual_setting
=
None
,
round_nearest
=
8
):
super
(
MobileNetV2Quant
,
self
).
__init__
()
block
=
InvertedResidual
input_channel
=
32
last_channel
=
1280
# setting of inverted residual blocks
self
.
cfgs
=
inverted_residual_setting
if
inverted_residual_setting
is
None
:
self
.
cfgs
=
[
# t, c, n, s
[
1
,
16
,
1
,
1
],
[
6
,
24
,
2
,
2
],
[
6
,
32
,
3
,
2
],
[
6
,
64
,
4
,
2
],
[
6
,
96
,
3
,
1
],
[
6
,
160
,
3
,
2
],
[
6
,
320
,
1
,
1
],
]
# building first layer
input_channel
=
_make_divisible
(
input_channel
*
width_mult
,
round_nearest
)
self
.
out_channels
=
_make_divisible
(
last_channel
*
max
(
1.0
,
width_mult
),
round_nearest
)
self
.
input_fake
=
nn
.
FakeQuantWithMinMax
(
ema
=
True
,
ema_decay
=
_ema_decay
,
quant_delay
=
_quant_delay
)
features
=
[
ConvBNReLU
(
3
,
input_channel
,
stride
=
2
)]
# building inverted residual blocks
for
t
,
c
,
n
,
s
in
self
.
cfgs
:
output_channel
=
_make_divisible
(
c
*
width_mult
,
round_nearest
)
for
i
in
range
(
n
):
stride
=
s
if
i
==
0
else
1
features
.
append
(
block
(
input_channel
,
output_channel
,
stride
,
expand_ratio
=
t
))
input_channel
=
output_channel
# building last several layers
features
.
append
(
ConvBNReLU
(
input_channel
,
self
.
out_channels
,
kernel_size
=
1
))
# make it nn.CellList
self
.
features
=
nn
.
SequentialCell
(
features
)
# mobilenet head
head
=
([
GlobalAvgPooling
(),
nn
.
DenseQuant
(
self
.
out_channels
,
num_classes
,
has_bias
=
True
,
per_channel
=
_per_channel
,
symmetric
=
_symmetric
,
quant_delay
=
_quant_delay
),
nn
.
FakeQuantWithMinMax
(
ema
=
True
,
ema_decay
=
_ema_decay
)]
if
not
has_dropout
else
[
GlobalAvgPooling
(),
nn
.
Dropout
(
0.2
),
nn
.
DenseQuant
(
self
.
out_channels
,
num_classes
,
has_bias
=
True
,
per_channel
=
_per_channel
,
symmetric
=
_symmetric
,
quant_delay
=
_quant_delay
),
nn
.
FakeQuantWithMinMax
(
ema
=
True
,
ema_decay
=
_ema_decay
,
quant_delay
=
_quant_delay
)])
self
.
head
=
nn
.
SequentialCell
(
head
)
def
construct
(
self
,
x
):
x
=
self
.
input_fake
(
x
)
x
=
self
.
features
(
x
)
x
=
self
.
head
(
x
)
return
x
def
mobilenetV2_quant
(
**
kwargs
):
"""
Constructs a MobileNet V2 model
"""
return
MobileNetV2Quant
(
**
kwargs
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录