Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MindSpore
mindarmour
提交
dff8f9b2
M
mindarmour
项目概览
MindSpore
/
mindarmour
通知
4
Star
2
Fork
3
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindarmour
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
dff8f9b2
编写于
7月 28, 2020
作者:
M
mindspore-ci-bot
提交者:
Gitee
7月 28, 2020
浏览文件
操作
浏览文件
下载
差异文件
!63 Fix example error of NoiseMech with wrong policy
Merge pull request !63 from pkuliuliu/master
上级
cb2825e3
78e55ba0
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
32 addition
and
28 deletion
+32
-28
example/mnist_demo/dp_ada_gaussian_config.py
example/mnist_demo/dp_ada_gaussian_config.py
+3
-3
example/mnist_demo/lenet5_config.py
example/mnist_demo/lenet5_config.py
+3
-3
example/mnist_demo/lenet5_dp.py
example/mnist_demo/lenet5_dp.py
+1
-1
example/mnist_demo/lenet5_dp_ada_gaussian.py
example/mnist_demo/lenet5_dp_ada_gaussian.py
+1
-1
example/mnist_demo/lenet5_dp_pynative_model.py
example/mnist_demo/lenet5_dp_pynative_model.py
+2
-2
mindarmour/diff_privacy/train/model.py
mindarmour/diff_privacy/train/model.py
+22
-18
未找到文件。
example/mnist_demo/dp_ada_gaussian_config.py
浏览文件 @
dff8f9b2
...
...
@@ -22,7 +22,7 @@ mnist_cfg = edict({
'num_classes'
:
10
,
# the number of classes of model's output
'lr'
:
0.01
,
# the learning rate of model's optimizer
'momentum'
:
0.9
,
# the momentum value of model's optimizer
'epoch_size'
:
10
,
# training epochs
'epoch_size'
:
5
,
# training epochs
'batch_size'
:
256
,
# batch size for training
'image_height'
:
32
,
# the height of training samples
'image_width'
:
32
,
# the width of training samples
...
...
@@ -31,9 +31,9 @@ mnist_cfg = edict({
'device_target'
:
'Ascend'
,
# device used
'data_path'
:
'./MNIST_unzip'
,
# the path of training and testing data set
'dataset_sink_mode'
:
False
,
# whether deliver all training data to device one time
'micro_batches'
:
16
,
# the number of small batches split from an original batch
'micro_batches'
:
32
,
# the number of small batches split from an original batch
'norm_bound'
:
1.0
,
# the clip bound of the gradients of model's training parameters
'initial_noise_multiplier'
:
1.0
,
# the initial multiplication coefficient of the noise added to training
'initial_noise_multiplier'
:
0.05
,
# the initial multiplication coefficient of the noise added to training
# parameters' gradients
'noise_mechanisms'
:
'AdaGaussian'
,
# the method of adding noise in gradients while training
'optimizer'
:
'Momentum'
# the base optimizer used for Differential privacy training
...
...
example/mnist_demo/lenet5_config.py
浏览文件 @
dff8f9b2
...
...
@@ -22,7 +22,7 @@ mnist_cfg = edict({
'num_classes'
:
10
,
# the number of classes of model's output
'lr'
:
0.01
,
# the learning rate of model's optimizer
'momentum'
:
0.9
,
# the momentum value of model's optimizer
'epoch_size'
:
10
,
# training epochs
'epoch_size'
:
5
,
# training epochs
'batch_size'
:
256
,
# batch size for training
'image_height'
:
32
,
# the height of training samples
'image_width'
:
32
,
# the width of training samples
...
...
@@ -31,9 +31,9 @@ mnist_cfg = edict({
'device_target'
:
'Ascend'
,
# device used
'data_path'
:
'./MNIST_unzip'
,
# the path of training and testing data set
'dataset_sink_mode'
:
False
,
# whether deliver all training data to device one time
'micro_batches'
:
16
,
# the number of small batches split from an original batch
'micro_batches'
:
32
,
# the number of small batches split from an original batch
'norm_bound'
:
1.0
,
# the clip bound of the gradients of model's training parameters
'initial_noise_multiplier'
:
1.0
,
# the initial multiplication coefficient of the noise added to training
'initial_noise_multiplier'
:
0.05
,
# the initial multiplication coefficient of the noise added to training
# parameters' gradients
'noise_mechanisms'
:
'Gaussian'
,
# the method of adding noise in gradients while training
'clip_mechanisms'
:
'Gaussian'
,
# the method of adaptive clipping gradients while training
...
...
example/mnist_demo/lenet5_dp.py
浏览文件 @
dff8f9b2
...
...
@@ -155,7 +155,7 @@ if __name__ == "__main__":
dataset_sink_mode
=
cfg
.
dataset_sink_mode
)
LOGGER
.
info
(
TAG
,
"============== Starting Testing =============="
)
ckpt_file_name
=
'trained_ckpt_file/checkpoint_lenet-
10
_234.ckpt'
ckpt_file_name
=
'trained_ckpt_file/checkpoint_lenet-
5
_234.ckpt'
param_dict
=
load_checkpoint
(
ckpt_file_name
)
load_param_into_net
(
network
,
param_dict
)
ds_eval
=
generate_mnist_dataset
(
os
.
path
.
join
(
cfg
.
data_path
,
'test'
),
...
...
example/mnist_demo/lenet5_dp_ada_gaussian.py
浏览文件 @
dff8f9b2
...
...
@@ -141,7 +141,7 @@ if __name__ == "__main__":
dataset_sink_mode
=
cfg
.
dataset_sink_mode
)
LOGGER
.
info
(
TAG
,
"============== Starting Testing =============="
)
ckpt_file_name
=
'trained_ckpt_file/checkpoint_lenet-
10
_234.ckpt'
ckpt_file_name
=
'trained_ckpt_file/checkpoint_lenet-
5
_234.ckpt'
param_dict
=
load_checkpoint
(
ckpt_file_name
)
load_param_into_net
(
network
,
param_dict
)
ds_eval
=
generate_mnist_dataset
(
os
.
path
.
join
(
cfg
.
data_path
,
'test'
),
...
...
example/mnist_demo/lenet5_dp_pynative_model.py
浏览文件 @
dff8f9b2
...
...
@@ -111,7 +111,7 @@ if __name__ == "__main__":
dp_opt
.
set_mechanisms
(
cfg
.
noise_mechanisms
,
norm_bound
=
cfg
.
norm_bound
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
,
decay_policy
=
'Exp'
)
decay_policy
=
None
)
# Create a factory class of clip mechanisms, this method is to adaptive clip
# gradients while training, decay_policy support 'Linear' and 'Geometric',
# learning_rate is the learning rate to update clip_norm,
...
...
@@ -147,7 +147,7 @@ if __name__ == "__main__":
dataset_sink_mode
=
cfg
.
dataset_sink_mode
)
LOGGER
.
info
(
TAG
,
"============== Starting Testing =============="
)
ckpt_file_name
=
'trained_ckpt_file/checkpoint_lenet-
10
_234.ckpt'
ckpt_file_name
=
'trained_ckpt_file/checkpoint_lenet-
5
_234.ckpt'
param_dict
=
load_checkpoint
(
ckpt_file_name
)
load_param_into_net
(
network
,
param_dict
)
ds_eval
=
generate_mnist_dataset
(
os
.
path
.
join
(
cfg
.
data_path
,
'test'
),
batch_size
=
cfg
.
batch_size
)
...
...
mindarmour/diff_privacy/train/model.py
浏览文件 @
dff8f9b2
...
...
@@ -656,14 +656,16 @@ class _TrainOneStepCell(Cell):
record_grad
=
self
.
grad
(
self
.
network
,
weights
)(
record_datas
[
0
],
record_labels
[
0
],
sens
)
beta
=
self
.
_zero
square_sum
=
self
.
_zero
for
grad
in
record_grad
:
square_sum
=
self
.
_add
(
square_sum
,
self
.
_reduce_sum
(
self
.
_square_all
(
grad
)))
norm_grad
=
self
.
_sqrt
(
square_sum
)
beta
=
self
.
_add
(
beta
,
self
.
_cast
(
self
.
_less
(
norm_grad
,
self
.
_norm_bound
),
mstype
.
float32
))
# calcu beta
if
self
.
_clip_mech
is
not
None
:
square_sum
=
self
.
_zero
for
grad
in
record_grad
:
square_sum
=
self
.
_add
(
square_sum
,
self
.
_reduce_sum
(
self
.
_square_all
(
grad
)))
norm_grad
=
self
.
_sqrt
(
square_sum
)
beta
=
self
.
_add
(
beta
,
self
.
_cast
(
self
.
_less
(
norm_grad
,
self
.
_norm_bound
),
mstype
.
float32
))
record_grad
=
self
.
_clip_by_global_norm
(
record_grad
,
GRADIENT_CLIP_TYPE
,
self
.
_norm_bound
)
...
...
@@ -675,14 +677,16 @@ class _TrainOneStepCell(Cell):
record_grad
=
self
.
grad
(
self
.
network
,
weights
)(
record_datas
[
i
],
record_labels
[
i
],
sens
)
square_sum
=
self
.
_zero
for
grad
in
record_grad
:
square_sum
=
self
.
_add
(
square_sum
,
self
.
_reduce_sum
(
self
.
_square_all
(
grad
)))
norm_grad
=
self
.
_sqrt
(
square_sum
)
beta
=
self
.
_add
(
beta
,
self
.
_cast
(
self
.
_less
(
norm_grad
,
self
.
_norm_bound
),
mstype
.
float32
))
# calcu beta
if
self
.
_clip_mech
is
not
None
:
square_sum
=
self
.
_zero
for
grad
in
record_grad
:
square_sum
=
self
.
_add
(
square_sum
,
self
.
_reduce_sum
(
self
.
_square_all
(
grad
)))
norm_grad
=
self
.
_sqrt
(
square_sum
)
beta
=
self
.
_add
(
beta
,
self
.
_cast
(
self
.
_less
(
norm_grad
,
self
.
_norm_bound
),
mstype
.
float32
))
record_grad
=
self
.
_clip_by_global_norm
(
record_grad
,
GRADIENT_CLIP_TYPE
,
...
...
@@ -690,7 +694,6 @@ class _TrainOneStepCell(Cell):
grads
=
self
.
_tuple_add
(
grads
,
record_grad
)
total_loss
=
P
.
TensorAdd
()(
total_loss
,
loss
)
loss
=
self
.
_div
(
total_loss
,
self
.
_micro_float
)
beta
=
self
.
_div
(
beta
,
self
.
_micro_batches
)
if
self
.
_noise_mech
is
not
None
:
grad_noise_tuple
=
()
...
...
@@ -710,8 +713,9 @@ class _TrainOneStepCell(Cell):
grads
=
self
.
grad_reducer
(
grads
)
if
self
.
_clip_mech
is
not
None
:
beta
=
self
.
_div
(
beta
,
self
.
_micro_batches
)
next_norm_bound
=
self
.
_clip_mech
(
beta
,
self
.
_norm_bound
)
self
.
_norm_bound
=
self
.
_assign
(
self
.
_norm_bound
,
next_norm_bound
)
loss
=
F
.
depend
(
loss
,
next
_norm_bound
)
loss
=
F
.
depend
(
loss
,
self
.
_norm_bound
)
return
F
.
depend
(
loss
,
self
.
optimizer
(
grads
))
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录