Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
magicwindyyd
mindspore
提交
a9d06eda
M
mindspore
项目概览
magicwindyyd
/
mindspore
与 Fork 源项目一致
Fork自
MindSpore / mindspore
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindspore
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
a9d06eda
编写于
6月 19, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
6月 19, 2020
浏览文件
操作
浏览文件
下载
差异文件
!2282 remove _quant_op.py from __init__.py
Merge pull request !2282 from chenzhongming/master
上级
8eceb897
bbce6faf
变更
10
隐藏空白更改
内联
并排
Showing
10 changed file
with
49 addition
and
48 deletion
+49
-48
mindspore/nn/layer/quant.py
mindspore/nn/layer/quant.py
+13
-13
mindspore/ops/_grad/grad_quant_ops.py
mindspore/ops/_grad/grad_quant_ops.py
+19
-18
mindspore/ops/operations/__init__.py
mindspore/ops/operations/__init__.py
+0
-1
mindspore/ops/operations/_quant_ops.py
mindspore/ops/operations/_quant_ops.py
+6
-6
mindspore/train/quant/quant.py
mindspore/train/quant/quant.py
+1
-1
tests/st/ops/gpu/test_batchnorm_fold2_op.py
tests/st/ops/gpu/test_batchnorm_fold2_op.py
+2
-1
tests/st/ops/gpu/test_batchnorm_fold_grad_op.py
tests/st/ops/gpu/test_batchnorm_fold_grad_op.py
+2
-2
tests/st/ops/gpu/test_batchnorm_fold_op.py
tests/st/ops/gpu/test_batchnorm_fold_op.py
+2
-2
tests/st/ops/gpu/test_correction_mul_grad_op.py
tests/st/ops/gpu/test_correction_mul_grad_op.py
+2
-2
tests/st/ops/gpu/test_correction_mul_op.py
tests/st/ops/gpu/test_correction_mul_op.py
+2
-2
未找到文件。
mindspore/nn/layer/quant.py
浏览文件 @
a9d06eda
...
@@ -32,7 +32,7 @@ from .activation import get_activation
...
@@ -32,7 +32,7 @@ from .activation import get_activation
from
..cell
import
Cell
from
..cell
import
Cell
from
.
import
conv
,
basic
from
.
import
conv
,
basic
from
..._checkparam
import
ParamValidator
as
validator
from
..._checkparam
import
ParamValidator
as
validator
from
...ops.operations
import
_quant_ops
as
Q
__all__
=
[
__all__
=
[
'Conv2dBnAct'
,
'Conv2dBnAct'
,
...
@@ -242,11 +242,11 @@ class BatchNormFoldCell(Cell):
...
@@ -242,11 +242,11 @@ class BatchNormFoldCell(Cell):
self
.
epsilon
=
epsilon
self
.
epsilon
=
epsilon
self
.
is_gpu
=
context
.
get_context
(
'device_target'
)
==
"GPU"
self
.
is_gpu
=
context
.
get_context
(
'device_target'
)
==
"GPU"
if
self
.
is_gpu
:
if
self
.
is_gpu
:
self
.
bn_train
=
P
.
BatchNormFold
(
momentum
,
epsilon
,
is_training
=
True
,
freeze_bn
=
freeze_bn
)
self
.
bn_train
=
Q
.
BatchNormFold
(
momentum
,
epsilon
,
is_training
=
True
,
freeze_bn
=
freeze_bn
)
self
.
bn_infer
=
P
.
BatchNormFold
(
momentum
,
epsilon
,
is_training
=
False
,
freeze_bn
=
freeze_bn
)
self
.
bn_infer
=
Q
.
BatchNormFold
(
momentum
,
epsilon
,
is_training
=
False
,
freeze_bn
=
freeze_bn
)
else
:
else
:
self
.
bn_reduce
=
P
.
BNTrainingReduce
()
self
.
bn_reduce
=
P
.
BNTrainingReduce
()
self
.
bn_update
=
P
.
BatchNormFoldD
(
momentum
,
epsilon
,
is_training
=
True
,
freeze_bn
=
freeze_bn
)
self
.
bn_update
=
Q
.
BatchNormFoldD
(
momentum
,
epsilon
,
is_training
=
True
,
freeze_bn
=
freeze_bn
)
def
construct
(
self
,
x
,
mean
,
variance
,
global_step
):
def
construct
(
self
,
x
,
mean
,
variance
,
global_step
):
if
self
.
is_gpu
:
if
self
.
is_gpu
:
...
@@ -337,11 +337,11 @@ class FakeQuantWithMinMax(Cell):
...
@@ -337,11 +337,11 @@ class FakeQuantWithMinMax(Cell):
# init fake quant relative op
# init fake quant relative op
if
per_channel
:
if
per_channel
:
quant_fun
=
partial
(
P
.
FakeQuantPerChannel
,
channel_axis
=
self
.
channel_axis
)
quant_fun
=
partial
(
Q
.
FakeQuantPerChannel
,
channel_axis
=
self
.
channel_axis
)
ema_fun
=
partial
(
P
.
FakeQuantMinMaxPerChannelUpdate
,
channel_axis
=
self
.
channel_axis
)
ema_fun
=
partial
(
Q
.
FakeQuantMinMaxPerChannelUpdate
,
channel_axis
=
self
.
channel_axis
)
else
:
else
:
quant_fun
=
P
.
FakeQuantPerLayer
quant_fun
=
Q
.
FakeQuantPerLayer
ema_fun
=
P
.
FakeQuantMinMaxPerLayerUpdate
ema_fun
=
Q
.
FakeQuantMinMaxPerLayerUpdate
if
self
.
is_ascend
:
if
self
.
is_ascend
:
self
.
fake_quant
=
quant_fun
(
num_bits
=
self
.
num_bits
,
self
.
fake_quant
=
quant_fun
(
num_bits
=
self
.
num_bits
,
...
@@ -510,13 +510,13 @@ class Conv2dBatchNormQuant(Cell):
...
@@ -510,13 +510,13 @@ class Conv2dBatchNormQuant(Cell):
symmetric
=
symmetric
,
symmetric
=
symmetric
,
narrow_range
=
narrow_range
)
narrow_range
=
narrow_range
)
self
.
batchnorm_fold
=
BatchNormFoldCell
(
epsilon
=
eps
,
momentum
=
momentum
,
freeze_bn
=
freeze_bn
)
self
.
batchnorm_fold
=
BatchNormFoldCell
(
epsilon
=
eps
,
momentum
=
momentum
,
freeze_bn
=
freeze_bn
)
self
.
correct_mul
=
P
.
CorrectionMul
(
channel_axis
)
self
.
correct_mul
=
Q
.
CorrectionMul
(
channel_axis
)
if
context
.
get_context
(
'device_target'
)
==
"Ascend"
:
if
context
.
get_context
(
'device_target'
)
==
"Ascend"
:
self
.
batchnorm_fold2_train
=
P
.
BatchNormFold2_D
(
freeze_bn
=
freeze_bn
)
self
.
batchnorm_fold2_train
=
Q
.
BatchNormFold2_D
(
freeze_bn
=
freeze_bn
)
self
.
batchnorm_fold2_infer
=
P
.
BatchNormFold2_D
(
freeze_bn
=
0
)
self
.
batchnorm_fold2_infer
=
Q
.
BatchNormFold2_D
(
freeze_bn
=
0
)
elif
context
.
get_context
(
'device_target'
)
==
"GPU"
:
elif
context
.
get_context
(
'device_target'
)
==
"GPU"
:
self
.
batchnorm_fold2_train
=
P
.
BatchNormFold2
(
freeze_bn
=
freeze_bn
)
self
.
batchnorm_fold2_train
=
Q
.
BatchNormFold2
(
freeze_bn
=
freeze_bn
)
self
.
batchnorm_fold2_infer
=
P
.
BatchNormFold2
(
freeze_bn
=
0
)
self
.
batchnorm_fold2_infer
=
Q
.
BatchNormFold2
(
freeze_bn
=
0
)
else
:
else
:
raise
ValueError
(
"Unsupported platform: {}"
.
format
(
context
.
get_context
(
'device_target'
)))
raise
ValueError
(
"Unsupported platform: {}"
.
format
(
context
.
get_context
(
'device_target'
)))
self
.
step
=
Parameter
(
initializer
(
'normal'
,
[
1
],
dtype
=
mstype
.
int32
),
name
=
'step'
,
requires_grad
=
False
)
self
.
step
=
Parameter
(
initializer
(
'normal'
,
[
1
],
dtype
=
mstype
.
int32
),
name
=
'step'
,
requires_grad
=
False
)
...
...
mindspore/ops/_grad/grad_quant_ops.py
浏览文件 @
a9d06eda
...
@@ -16,15 +16,16 @@
...
@@ -16,15 +16,16 @@
"""Generate bprop for aware quantization ops"""
"""Generate bprop for aware quantization ops"""
from
..
import
operations
as
P
from
..
import
operations
as
P
from
..operations
import
_quant_ops
as
Q
from
.grad_base
import
bprop_getters
from
.grad_base
import
bprop_getters
from
..composite.multitype_ops.zeros_like_impl
import
zeros_like
from
..composite.multitype_ops.zeros_like_impl
import
zeros_like
from
...
import
context
from
...
import
context
@
bprop_getters
.
register
(
P
.
FakeQuantPerLayer
)
@
bprop_getters
.
register
(
Q
.
FakeQuantPerLayer
)
def
get_bprop_fakequant_with_minmax
(
self
):
def
get_bprop_fakequant_with_minmax
(
self
):
"""Generate bprop for FakeQuantPerLayer for GPU and Ascend"""
"""Generate bprop for FakeQuantPerLayer for GPU and Ascend"""
op
=
P
.
FakeQuantPerLayerGrad
(
op
=
Q
.
FakeQuantPerLayerGrad
(
num_bits
=
self
.
num_bits
,
quant_delay
=
self
.
quant_delay
)
num_bits
=
self
.
num_bits
,
quant_delay
=
self
.
quant_delay
)
def
bprop
(
x
,
x_min
,
x_max
,
out
,
dout
):
def
bprop
(
x
,
x_min
,
x_max
,
out
,
dout
):
...
@@ -34,10 +35,10 @@ def get_bprop_fakequant_with_minmax(self):
...
@@ -34,10 +35,10 @@ def get_bprop_fakequant_with_minmax(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
FakeQuantPerChannel
)
@
bprop_getters
.
register
(
Q
.
FakeQuantPerChannel
)
def
get_bprop_fakequant_with_minmax_perchannel
(
self
):
def
get_bprop_fakequant_with_minmax_perchannel
(
self
):
"""Generate bprop for FakeQuantPerChannel"""
"""Generate bprop for FakeQuantPerChannel"""
op
=
P
.
FakeQuantPerChannelGrad
(
num_bits
=
self
.
num_bits
,
op
=
Q
.
FakeQuantPerChannelGrad
(
num_bits
=
self
.
num_bits
,
quant_delay
=
self
.
quant_delay
,
quant_delay
=
self
.
quant_delay
,
symmetric
=
self
.
symmetric
,
symmetric
=
self
.
symmetric
,
narrow_range
=
self
.
symmetric
,
narrow_range
=
self
.
symmetric
,
...
@@ -50,10 +51,10 @@ def get_bprop_fakequant_with_minmax_perchannel(self):
...
@@ -50,10 +51,10 @@ def get_bprop_fakequant_with_minmax_perchannel(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
BatchNormFold
)
@
bprop_getters
.
register
(
Q
.
BatchNormFold
)
def
get_bprop_batchnorm_fold
(
self
):
def
get_bprop_batchnorm_fold
(
self
):
"""Generate bprop for BatchNormFold for GPU"""
"""Generate bprop for BatchNormFold for GPU"""
op
=
P
.
BatchNormFoldGrad
(
self
.
epsilon
,
self
.
is_training
,
self
.
freeze_bn
)
op
=
Q
.
BatchNormFoldGrad
(
self
.
epsilon
,
self
.
is_training
,
self
.
freeze_bn
)
def
bprop
(
x
,
mean
,
variance
,
global_step
,
out
,
dout
):
def
bprop
(
x
,
mean
,
variance
,
global_step
,
out
,
dout
):
dx
=
op
(
dout
[
0
],
dout
[
1
],
x
,
out
[
0
],
out
[
1
],
global_step
)
dx
=
op
(
dout
[
0
],
dout
[
1
],
x
,
out
[
0
],
out
[
1
],
global_step
)
...
@@ -62,11 +63,11 @@ def get_bprop_batchnorm_fold(self):
...
@@ -62,11 +63,11 @@ def get_bprop_batchnorm_fold(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
CorrectionMul
)
@
bprop_getters
.
register
(
Q
.
CorrectionMul
)
def
get_bprop_correction_mul
(
self
):
def
get_bprop_correction_mul
(
self
):
"""Generate bprop for CorrectionMul for Ascend and GPU"""
"""Generate bprop for CorrectionMul for Ascend and GPU"""
grad_dx
=
P
.
CorrectionMulGrad
(
self
.
channel_axis
)
grad_dx
=
Q
.
CorrectionMulGrad
(
self
.
channel_axis
)
grad_d_batch_std
=
P
.
CorrectionMulGradReduce
(
self
.
channel_axis
)
grad_d_batch_std
=
Q
.
CorrectionMulGradReduce
(
self
.
channel_axis
)
def
bprop
(
x
,
batch_std
,
running_std
,
out
,
dout
):
def
bprop
(
x
,
batch_std
,
running_std
,
out
,
dout
):
dx
,
d_batch_std
=
grad_dx
(
dout
,
x
,
batch_std
,
running_std
)
dx
,
d_batch_std
=
grad_dx
(
dout
,
x
,
batch_std
,
running_std
)
...
@@ -83,10 +84,10 @@ def get_bprop_correction_mul(self):
...
@@ -83,10 +84,10 @@ def get_bprop_correction_mul(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
BatchNormFold2
)
@
bprop_getters
.
register
(
Q
.
BatchNormFold2
)
def
get_bprop_batchnorm_fold2
(
self
):
def
get_bprop_batchnorm_fold2
(
self
):
"""Generate bprop for BatchNormFold2 for GPU"""
"""Generate bprop for BatchNormFold2 for GPU"""
op_f
=
P
.
BatchNormFold2Grad
(
freeze_bn
=
self
.
freeze_bn
)
op_f
=
Q
.
BatchNormFold2Grad
(
freeze_bn
=
self
.
freeze_bn
)
def
bprop
(
x
,
beta
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
running_mean
,
global_step
,
out
,
dout
):
def
bprop
(
x
,
beta
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
running_mean
,
global_step
,
out
,
dout
):
d_batch_std
,
d_batch_mean
,
d_beta
,
d_gamma
,
d_x
=
op_f
(
dout
,
x
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
d_batch_std
,
d_batch_mean
,
d_beta
,
d_gamma
,
d_x
=
op_f
(
dout
,
x
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
...
@@ -97,10 +98,10 @@ def get_bprop_batchnorm_fold2(self):
...
@@ -97,10 +98,10 @@ def get_bprop_batchnorm_fold2(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
BatchNormFoldD
)
@
bprop_getters
.
register
(
Q
.
BatchNormFoldD
)
def
get_bprop_BatchNormFold
(
self
):
def
get_bprop_BatchNormFold
(
self
):
"""Generate bprop for BatchNormFold for Ascend"""
"""Generate bprop for BatchNormFold for Ascend"""
op
=
P
.
BatchNormFoldGradD
(
self
.
epsilon
,
self
.
is_training
,
self
.
freeze_bn
)
op
=
Q
.
BatchNormFoldGradD
(
self
.
epsilon
,
self
.
is_training
,
self
.
freeze_bn
)
def
bprop
(
x
,
x_sum
,
x_square_sum
,
mean
,
variance
,
out
,
dout
):
def
bprop
(
x
,
x_sum
,
x_square_sum
,
mean
,
variance
,
out
,
dout
):
dx
=
op
(
dout
[
1
],
dout
[
2
],
x
,
out
[
1
],
out
[
2
])
dx
=
op
(
dout
[
1
],
dout
[
2
],
x
,
out
[
1
],
out
[
2
])
...
@@ -117,11 +118,11 @@ def get_bprop_BNTrainingReduce(self):
...
@@ -117,11 +118,11 @@ def get_bprop_BNTrainingReduce(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
BatchNormFold2_D
)
@
bprop_getters
.
register
(
Q
.
BatchNormFold2_D
)
def
get_bprop_batchnorm_fold2_
(
self
):
def
get_bprop_batchnorm_fold2_
(
self
):
"""Generate bprop for BatchNormFold2 for Ascend"""
"""Generate bprop for BatchNormFold2 for Ascend"""
op_reduce
=
P
.
BatchNormFold2GradReduce
(
freeze_bn
=
self
.
freeze_bn
)
op_reduce
=
Q
.
BatchNormFold2GradReduce
(
freeze_bn
=
self
.
freeze_bn
)
op_f
=
P
.
BatchNormFold2GradD
(
freeze_bn
=
self
.
freeze_bn
)
op_f
=
Q
.
BatchNormFold2GradD
(
freeze_bn
=
self
.
freeze_bn
)
def
bprop
(
x
,
beta
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
out
,
dout
):
def
bprop
(
x
,
beta
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
out
,
dout
):
dout_reduce
,
dout_x_reduce
=
op_reduce
(
dout
,
x
)
dout_reduce
,
dout_x_reduce
=
op_reduce
(
dout
,
x
)
...
@@ -132,7 +133,7 @@ def get_bprop_batchnorm_fold2_(self):
...
@@ -132,7 +133,7 @@ def get_bprop_batchnorm_fold2_(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
FakeQuantMinMaxPerLayerUpdate
)
@
bprop_getters
.
register
(
Q
.
FakeQuantMinMaxPerLayerUpdate
)
def
get_bprop_fakequant_with_minmax_per_layer_update
(
self
):
def
get_bprop_fakequant_with_minmax_per_layer_update
(
self
):
"""Generate bprop for FakeQuantMinMaxPerLayerUpdate for Ascend"""
"""Generate bprop for FakeQuantMinMaxPerLayerUpdate for Ascend"""
...
@@ -142,7 +143,7 @@ def get_bprop_fakequant_with_minmax_per_layer_update(self):
...
@@ -142,7 +143,7 @@ def get_bprop_fakequant_with_minmax_per_layer_update(self):
return
bprop
return
bprop
@
bprop_getters
.
register
(
P
.
FakeQuantMinMaxPerChannelUpdate
)
@
bprop_getters
.
register
(
Q
.
FakeQuantMinMaxPerChannelUpdate
)
def
get_bprop_fakequant_with_minmax_per_channel_update
(
self
):
def
get_bprop_fakequant_with_minmax_per_channel_update
(
self
):
"""Generate bprop for FakeQuantMinMaxPerChannelUpdate for Ascend"""
"""Generate bprop for FakeQuantMinMaxPerChannelUpdate for Ascend"""
...
...
mindspore/ops/operations/__init__.py
浏览文件 @
a9d06eda
...
@@ -76,7 +76,6 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
...
@@ -76,7 +76,6 @@ from .nn_ops import (LSTM, SGD, Adam, SparseApplyAdam, SparseApplyLazyAdam, Appl
ApplyRMSProp
,
ApplyCenteredRMSProp
,
BasicLSTMCell
,
InTopK
)
ApplyRMSProp
,
ApplyCenteredRMSProp
,
BasicLSTMCell
,
InTopK
)
from
.other_ops
import
(
Assign
,
IOU
,
BoundingBoxDecode
,
BoundingBoxEncode
,
from
.other_ops
import
(
Assign
,
IOU
,
BoundingBoxDecode
,
BoundingBoxEncode
,
CheckValid
,
MakeRefKey
,
Partial
,
Depend
,
CheckBprop
)
CheckValid
,
MakeRefKey
,
Partial
,
Depend
,
CheckBprop
)
from
._quant_ops
import
*
from
.thor_ops
import
*
from
.thor_ops
import
*
__all__
=
[
__all__
=
[
...
...
mindspore/ops/operations/_quant_ops.py
浏览文件 @
a9d06eda
...
@@ -69,7 +69,7 @@ class FakeQuantPerLayer(PrimitiveWithInfer):
...
@@ -69,7 +69,7 @@ class FakeQuantPerLayer(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> output_tensor =
P.
FakeQuantPerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor)
>>> output_tensor = FakeQuantPerLayer(num_bits=8)(input_tensor, min_tensor, max_tensor)
"""
"""
support_quant_bit
=
[
4
,
7
,
8
]
support_quant_bit
=
[
4
,
7
,
8
]
...
@@ -129,7 +129,7 @@ class FakeQuantPerLayerGrad(PrimitiveWithInfer):
...
@@ -129,7 +129,7 @@ class FakeQuantPerLayerGrad(PrimitiveWithInfer):
Performs grad of FakeQuantPerLayerGrad operation.
Performs grad of FakeQuantPerLayerGrad operation.
Examples:
Examples:
>>> fake_min_max_grad =
P.
FakeQuantPerLayerGrad()
>>> fake_min_max_grad = FakeQuantPerLayerGrad()
>>> dout = Tensor(np.array([[-2.3, 1.2], [5.7, 0.2]]), mindspore.float32)
>>> dout = Tensor(np.array([[-2.3, 1.2], [5.7, 0.2]]), mindspore.float32)
>>> input_x = Tensor(np.array([[18, -23], [0.2, 6]]), mindspore.float32)
>>> input_x = Tensor(np.array([[18, -23], [0.2, 6]]), mindspore.float32)
>>> _min = Tensor(np.array([-4]), mindspore.float32)
>>> _min = Tensor(np.array([-4]), mindspore.float32)
...
@@ -206,7 +206,7 @@ class FakeQuantPerChannel(PrimitiveWithInfer):
...
@@ -206,7 +206,7 @@ class FakeQuantPerChannel(PrimitiveWithInfer):
- Tensor, has the same type as input.
- Tensor, has the same type as input.
Examples:
Examples:
>>> fake_quant =
P.
FakeQuantPerChannel()
>>> fake_quant = FakeQuantPerChannel()
>>> input_x = Tensor(np.array([3, 4, 5, -2, -3, -1]).reshape(3, 2), mindspore.float32)
>>> input_x = Tensor(np.array([3, 4, 5, -2, -3, -1]).reshape(3, 2), mindspore.float32)
>>> _min = Tensor(np.linspace(-2, 2, 12).reshape(3, 2, 2), mindspore.float32)
>>> _min = Tensor(np.linspace(-2, 2, 12).reshape(3, 2, 2), mindspore.float32)
>>> _max = Tensor(np.linspace(8, 12, 12).reshape(3, 2, 2), mindspore.float32)
>>> _max = Tensor(np.linspace(8, 12, 12).reshape(3, 2, 2), mindspore.float32)
...
@@ -275,7 +275,7 @@ class FakeQuantPerChannelGrad(PrimitiveWithInfer):
...
@@ -275,7 +275,7 @@ class FakeQuantPerChannelGrad(PrimitiveWithInfer):
Performs grad of FakeQuantPerChannelGrad operation.
Performs grad of FakeQuantPerChannelGrad operation.
Examples:
Examples:
>>> fqmmpc_grad =
P.
FakeQuantPerChannelGrad()
>>> fqmmpc_grad = FakeQuantPerChannelGrad()
>>> input_x = Tensor(np.random.randint(-4, 4, (2, 3, 4)), mindspore.float32)
>>> input_x = Tensor(np.random.randint(-4, 4, (2, 3, 4)), mindspore.float32)
>>> dout = Tensor(np.random.randint(-2, 2, (2, 3, 4)), mindspore.float32)
>>> dout = Tensor(np.random.randint(-2, 2, (2, 3, 4)), mindspore.float32)
>>> _min = Tensor(np.random.randint(-8, 2, (2, 3, 4)), mindspore.float32)
>>> _min = Tensor(np.random.randint(-8, 2, (2, 3, 4)), mindspore.float32)
...
@@ -858,7 +858,7 @@ class FakeQuantMinMaxPerLayerUpdate(PrimitiveWithInfer):
...
@@ -858,7 +858,7 @@ class FakeQuantMinMaxPerLayerUpdate(PrimitiveWithInfer):
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> input_tensor = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> min_tensor = Tensor(np.array([-6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> max_tensor = Tensor(np.array([6]), mstype.float32)
>>> output_tensor =
P.
FakeQuantWithMinMax(num_bits=8)(input_tensor, min_tensor, max_tensor)
>>> output_tensor = FakeQuantWithMinMax(num_bits=8)(input_tensor, min_tensor, max_tensor)
"""
"""
support_quant_bit
=
[
4
,
7
,
8
]
support_quant_bit
=
[
4
,
7
,
8
]
...
@@ -932,7 +932,7 @@ class FakeQuantMinMaxPerChannelUpdate(PrimitiveWithInfer):
...
@@ -932,7 +932,7 @@ class FakeQuantMinMaxPerChannelUpdate(PrimitiveWithInfer):
>>> x = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> x = Tensor(np.random.rand(3, 16, 5, 5), mstype.float32)
>>> min = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
>>> min = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
>>> max = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
>>> max = Tensor(np.random.uniform(-1, 1, size=16), mstype.float32)
>>> output_tensor =
P.
FakeQuantWithMinMax(num_bits=8)(x, min, max)
>>> output_tensor = FakeQuantWithMinMax(num_bits=8)(x, min, max)
"""
"""
support_quant_bit
=
[
4
,
7
,
8
]
support_quant_bit
=
[
4
,
7
,
8
]
...
...
mindspore/train/quant/quant.py
浏览文件 @
a9d06eda
...
@@ -253,7 +253,7 @@ def convert_quant_network(network,
...
@@ -253,7 +253,7 @@ def convert_quant_network(network,
symmetric (bool): Quantization algorithm use symmetric or not. Default: False.
symmetric (bool): Quantization algorithm use symmetric or not. Default: False.
narrow_range (bool): Quantization algorithm use narrow range or not. Default: False.
narrow_range (bool): Quantization algorithm use narrow range or not. Default: False.
r
eturns:
R
eturns:
Cell, Network which has change to aware quantization training network.
Cell, Network which has change to aware quantization training network.
"""
"""
net
=
ConvertToQuantNetwork
(
net
=
ConvertToQuantNetwork
(
...
...
tests/st/ops/gpu/test_batchnorm_fold2_op.py
浏览文件 @
a9d06eda
...
@@ -21,6 +21,7 @@ import mindspore.nn as nn
...
@@ -21,6 +21,7 @@ import mindspore.nn as nn
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
import
operations
as
P
from
mindspore.ops.operations
import
_quant_ops
as
Q
context
.
set_context
(
device_target
=
'GPU'
)
context
.
set_context
(
device_target
=
'GPU'
)
...
@@ -28,7 +29,7 @@ context.set_context(device_target='GPU')
...
@@ -28,7 +29,7 @@ context.set_context(device_target='GPU')
class
Net
(
nn
.
Cell
):
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
super
(
Net
,
self
).
__init__
()
self
.
op
=
P
.
BatchNormFold2
(
100000
)
self
.
op
=
Q
.
BatchNormFold2
(
100000
)
@
ms_function
@
ms_function
def
construct
(
self
,
x
,
beta
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
running_mean
,
current_step
):
def
construct
(
self
,
x
,
beta
,
gamma
,
batch_std
,
batch_mean
,
running_std
,
running_mean
,
current_step
):
...
...
tests/st/ops/gpu/test_batchnorm_fold_grad_op.py
浏览文件 @
a9d06eda
...
@@ -20,7 +20,7 @@ import mindspore.context as context
...
@@ -20,7 +20,7 @@ import mindspore.context as context
import
mindspore.nn
as
nn
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
.operations
import
_quant_ops
as
Q
context
.
set_context
(
device_target
=
'GPU'
)
context
.
set_context
(
device_target
=
'GPU'
)
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
class
Net
(
nn
.
Cell
):
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
super
(
Net
,
self
).
__init__
()
self
.
op
=
P
.
BatchNormFoldGrad
(
freeze_bn
=
10
)
self
.
op
=
Q
.
BatchNormFoldGrad
(
freeze_bn
=
10
)
@
ms_function
@
ms_function
def
construct
(
self
,
d_batch_mean
,
d_batch_std
,
x
,
batch_mean
,
batch_std
,
current_step
):
def
construct
(
self
,
d_batch_mean
,
d_batch_std
,
x
,
batch_mean
,
batch_std
,
current_step
):
...
...
tests/st/ops/gpu/test_batchnorm_fold_op.py
浏览文件 @
a9d06eda
...
@@ -20,7 +20,7 @@ import mindspore.context as context
...
@@ -20,7 +20,7 @@ import mindspore.context as context
import
mindspore.nn
as
nn
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
.operations
import
_quant_ops
as
Q
context
.
set_context
(
device_target
=
'GPU'
)
context
.
set_context
(
device_target
=
'GPU'
)
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
class
Net
(
nn
.
Cell
):
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
super
(
Net
,
self
).
__init__
()
self
.
op
=
P
.
BatchNormFold
(
momentum
=
0.9
,
freeze_bn
=
10
)
self
.
op
=
Q
.
BatchNormFold
(
momentum
=
0.9
,
freeze_bn
=
10
)
@
ms_function
@
ms_function
def
construct
(
self
,
x
,
mean
,
variance
,
current_step
):
def
construct
(
self
,
x
,
mean
,
variance
,
current_step
):
...
...
tests/st/ops/gpu/test_correction_mul_grad_op.py
浏览文件 @
a9d06eda
...
@@ -20,7 +20,7 @@ import mindspore.context as context
...
@@ -20,7 +20,7 @@ import mindspore.context as context
import
mindspore.nn
as
nn
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
.operations
import
_quant_ops
as
Q
context
.
set_context
(
device_target
=
'GPU'
)
context
.
set_context
(
device_target
=
'GPU'
)
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
class
Net
(
nn
.
Cell
):
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
super
(
Net
,
self
).
__init__
()
self
.
op_w
=
P
.
CorrectionMulGrad
()
self
.
op_w
=
Q
.
CorrectionMulGrad
()
@
ms_function
@
ms_function
def
construct
(
self
,
dy
,
x
,
batch_std
,
running_std
):
def
construct
(
self
,
dy
,
x
,
batch_std
,
running_std
):
...
...
tests/st/ops/gpu/test_correction_mul_op.py
浏览文件 @
a9d06eda
...
@@ -20,7 +20,7 @@ import mindspore.context as context
...
@@ -20,7 +20,7 @@ import mindspore.context as context
import
mindspore.nn
as
nn
import
mindspore.nn
as
nn
from
mindspore
import
Tensor
from
mindspore
import
Tensor
from
mindspore.common.api
import
ms_function
from
mindspore.common.api
import
ms_function
from
mindspore.ops
import
operations
as
P
from
mindspore.ops
.operations
import
_quant_ops
as
Q
context
.
set_context
(
device_target
=
'GPU'
)
context
.
set_context
(
device_target
=
'GPU'
)
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
...
@@ -28,7 +28,7 @@ context.set_context(device_target='GPU')
class
Net
(
nn
.
Cell
):
class
Net
(
nn
.
Cell
):
def
__init__
(
self
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
super
(
Net
,
self
).
__init__
()
self
.
op
=
P
.
CorrectionMul
()
self
.
op
=
Q
.
CorrectionMul
()
@
ms_function
@
ms_function
def
construct
(
self
,
x
,
batch_var
,
moving_var
):
def
construct
(
self
,
x
,
batch_var
,
moving_var
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录