Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MindSpore
mindarmour
提交
9dcca233
M
mindarmour
项目概览
MindSpore
/
mindarmour
通知
4
Star
2
Fork
3
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindarmour
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9dcca233
编写于
6月 24, 2020
作者:
Z
zhenghuanhuan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change DPModel param l2_norm_clip to norm_clip.
上级
2899fd26
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
27 addition
and
27 deletion
+27
-27
example/mnist_demo/lenet5_config.py
example/mnist_demo/lenet5_config.py
+2
-2
example/mnist_demo/lenet5_dp.py
example/mnist_demo/lenet5_dp.py
+3
-3
example/mnist_demo/lenet5_dp_pynative_mode.py
example/mnist_demo/lenet5_dp_pynative_mode.py
+3
-3
mindarmour/diff_privacy/train/model.py
mindarmour/diff_privacy/train/model.py
+13
-13
tests/ut/python/diff_privacy/test_model_train.py
tests/ut/python/diff_privacy/test_model_train.py
+6
-6
未找到文件。
example/mnist_demo/lenet5_config.py
浏览文件 @
9dcca233
...
...
@@ -30,9 +30,9 @@ mnist_cfg = edict({
'keep_checkpoint_max'
:
10
,
# the maximum number of checkpoint files would be saved
'device_target'
:
'Ascend'
,
# device used
'data_path'
:
'./MNIST_unzip'
,
# the path of training and testing data set
'dataset_sink_mode'
:
False
,
# whether deliver all training data to device one time
'dataset_sink_mode'
:
False
,
# whether deliver all training data to device one time
'micro_batches'
:
16
,
# the number of small batches split from an original batch
'
l2_norm_bound
'
:
1.0
,
# the clip bound of the gradients of model's training parameters
'
norm_clip
'
:
1.0
,
# the clip bound of the gradients of model's training parameters
'initial_noise_multiplier'
:
0.2
,
# the initial multiplication coefficient of the noise added to training
# parameters' gradients
'mechanisms'
:
'AdaGaussian'
,
# the method of adding noise in gradients while training
...
...
example/mnist_demo/lenet5_dp.py
浏览文件 @
9dcca233
...
...
@@ -108,7 +108,7 @@ if __name__ == "__main__":
# means that the privacy protection effect is weak. Mechanisms can be 'Gaussian' or 'AdaGaussian', in which noise
# would be decayed with 'AdaGaussian' mechanism while be constant with 'Gaussian' mechanism.
mech
=
MechanismsFactory
().
create
(
cfg
.
mechanisms
,
norm_bound
=
cfg
.
l2_norm_bound
,
norm_bound
=
cfg
.
norm_clip
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
)
net_opt
=
nn
.
Momentum
(
params
=
network
.
trainable_params
(),
learning_rate
=
cfg
.
lr
,
momentum
=
cfg
.
momentum
)
# Create a monitor for DP training. The function of the monitor is to compute and print the privacy budget(eps
...
...
@@ -116,11 +116,11 @@ if __name__ == "__main__":
rdp_monitor
=
PrivacyMonitorFactory
.
create
(
'rdp'
,
num_samples
=
60000
,
batch_size
=
cfg
.
batch_size
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
*
cfg
.
l2_norm_bound
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
*
cfg
.
norm_clip
,
per_print_times
=
10
)
# Create the DP model for training.
model
=
DPModel
(
micro_batches
=
cfg
.
micro_batches
,
norm_clip
=
cfg
.
l2_norm_bound
,
norm_clip
=
cfg
.
norm_clip
,
mech
=
mech
,
network
=
network
,
loss_fn
=
net_loss
,
...
...
example/mnist_demo/lenet5_dp_pynative_mode.py
浏览文件 @
9dcca233
...
...
@@ -109,7 +109,7 @@ if __name__ == "__main__":
# would be decayed with 'AdaGaussian' mechanism while be constant with 'Gaussian' mechanism.
dp_opt
=
DPOptimizerClassFactory
(
micro_batches
=
cfg
.
micro_batches
)
dp_opt
.
set_mechanisms
(
cfg
.
mechanisms
,
norm_bound
=
cfg
.
l2_norm_bound
,
norm_bound
=
cfg
.
norm_clip
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
)
net_opt
=
dp_opt
.
create
(
'Momentum'
)(
params
=
network
.
trainable_params
(),
learning_rate
=
cfg
.
lr
,
momentum
=
cfg
.
momentum
)
# Create a monitor for DP training. The function of the monitor is to compute and print the privacy budget(eps
...
...
@@ -117,11 +117,11 @@ if __name__ == "__main__":
rdp_monitor
=
PrivacyMonitorFactory
.
create
(
'rdp'
,
num_samples
=
60000
,
batch_size
=
cfg
.
batch_size
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
*
cfg
.
l2_norm_bound
,
initial_noise_multiplier
=
cfg
.
initial_noise_multiplier
*
cfg
.
norm_clip
,
per_print_times
=
10
)
# Create the DP model for training.
model
=
DPModel
(
micro_batches
=
cfg
.
micro_batches
,
norm_clip
=
cfg
.
l2_norm_bound
,
norm_clip
=
cfg
.
norm_clip
,
mech
=
None
,
network
=
network
,
loss_fn
=
net_loss
,
...
...
mindarmour/diff_privacy/train/model.py
浏览文件 @
9dcca233
...
...
@@ -93,7 +93,7 @@ class DPModel(Model):
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
>>> net_opt = Momentum(params=net.trainable_params(), learning_rate=0.01, momentum=0.9)
>>> mech = MechanismsFactory().create('Gaussian',
>>> norm_bound=args.
l2_norm_bound
,
>>> norm_bound=args.
norm_clip
,
>>> initial_noise_multiplier=args.initial_noise_multiplier)
>>> model = DPModel(micro_batches=2,
>>> norm_clip=1.0,
...
...
@@ -111,8 +111,8 @@ class DPModel(Model):
self
.
_micro_batches
=
check_int_positive
(
'micro_batches'
,
micro_batches
)
else
:
self
.
_micro_batches
=
None
float_norm_clip
=
check_param_type
(
'l2_
norm_clip'
,
norm_clip
,
float
)
self
.
_norm_clip
=
check_value_positive
(
'
l2_norm_clip'
,
float_
norm_clip
)
norm_clip
=
check_param_type
(
'
norm_clip'
,
norm_clip
,
float
)
self
.
_norm_clip
=
check_value_positive
(
'
norm_clip'
,
norm_clip
)
if
mech
is
not
None
and
"DPOptimizer"
in
kwargs
[
'optimizer'
].
__class__
.
__name__
:
raise
ValueError
(
'DPOptimizer is not supported while mech is not None'
)
if
mech
is
None
:
...
...
@@ -180,14 +180,14 @@ class DPModel(Model):
optimizer
,
scale_update_cell
=
update_cell
,
micro_batches
=
self
.
_micro_batches
,
l2_
norm_clip
=
self
.
_norm_clip
,
norm_clip
=
self
.
_norm_clip
,
mech
=
self
.
_mech
).
set_train
()
return
network
network
=
_TrainOneStepCell
(
network
,
optimizer
,
loss_scale
,
micro_batches
=
self
.
_micro_batches
,
l2_
norm_clip
=
self
.
_norm_clip
,
norm_clip
=
self
.
_norm_clip
,
mech
=
self
.
_mech
).
set_train
()
return
network
...
...
@@ -300,7 +300,7 @@ class _TrainOneStepWithLossScaleCell(Cell):
optimizer (Cell): Optimizer for updating the weights.
scale_update_cell(Cell): The loss scaling update logic cell. Default: None.
micro_batches (int): The number of small batches split from an original batch. Default: None.
l2_
norm_clip (float): Use to clip the bound, if set 1, will return the original data. Default: 1.0.
norm_clip (float): Use to clip the bound, if set 1, will return the original data. Default: 1.0.
mech (Mechanisms): The object can generate the different type of noise. Default: None.
Inputs:
...
...
@@ -316,7 +316,7 @@ class _TrainOneStepWithLossScaleCell(Cell):
- **loss_scale** (Tensor) - Tensor with shape :math:`()`.
"""
def
__init__
(
self
,
network
,
optimizer
,
scale_update_cell
=
None
,
micro_batches
=
None
,
l2_
norm_clip
=
1.0
,
mech
=
None
):
def
__init__
(
self
,
network
,
optimizer
,
scale_update_cell
=
None
,
micro_batches
=
None
,
norm_clip
=
1.0
,
mech
=
None
):
super
(
_TrainOneStepWithLossScaleCell
,
self
).
__init__
(
auto_prefix
=
False
)
self
.
network
=
network
self
.
network
.
set_grad
()
...
...
@@ -358,8 +358,8 @@ class _TrainOneStepWithLossScaleCell(Cell):
# dp params
self
.
_micro_batches
=
micro_batches
float_norm_clip
=
check_param_type
(
'l2_norm_clip'
,
l2_
norm_clip
,
float
)
self
.
_l2_norm
=
check_value_positive
(
'
l2_norm_clip'
,
float_
norm_clip
)
norm_clip
=
check_param_type
(
'norm_clip'
,
norm_clip
,
float
)
self
.
_l2_norm
=
check_value_positive
(
'
norm_clip'
,
norm_clip
)
self
.
_split
=
P
.
Split
(
0
,
self
.
_micro_batches
)
self
.
_clip_by_global_norm
=
_ClipGradients
()
self
.
_mech
=
mech
...
...
@@ -452,7 +452,7 @@ class _TrainOneStepCell(Cell):
optimizer (Cell): Optimizer for updating the weights.
sens (Number): The scaling number to be filled as the input of back propagation. Default value is 1.0.
micro_batches (int): The number of small batches split from an original batch. Default: None.
l2_
norm_clip (float): Use to clip the bound, if set 1, will return the original data. Default: 1.0.
norm_clip (float): Use to clip the bound, if set 1, will return the original data. Default: 1.0.
mech (Mechanisms): The object can generate the different type of noise. Default: None.
Inputs:
...
...
@@ -463,7 +463,7 @@ class _TrainOneStepCell(Cell):
Tensor, a scalar Tensor with shape :math:`()`.
"""
def
__init__
(
self
,
network
,
optimizer
,
sens
=
1.0
,
micro_batches
=
None
,
l2_
norm_clip
=
1.0
,
mech
=
None
):
def
__init__
(
self
,
network
,
optimizer
,
sens
=
1.0
,
micro_batches
=
None
,
norm_clip
=
1.0
,
mech
=
None
):
super
(
_TrainOneStepCell
,
self
).
__init__
(
auto_prefix
=
False
)
self
.
network
=
network
self
.
network
.
set_grad
()
...
...
@@ -484,8 +484,8 @@ class _TrainOneStepCell(Cell):
# dp params
self
.
_micro_batches
=
micro_batches
float_norm_clip
=
check_param_type
(
'l2_norm_clip'
,
l2_
norm_clip
,
float
)
self
.
_l2_norm
=
check_value_positive
(
'
l2_norm_clip'
,
float_
norm_clip
)
norm_clip
=
check_param_type
(
'norm_clip'
,
norm_clip
,
float
)
self
.
_l2_norm
=
check_value_positive
(
'
norm_clip'
,
norm_clip
)
self
.
_split
=
P
.
Split
(
0
,
self
.
_micro_batches
)
self
.
_clip_by_global_norm
=
_ClipGradients
()
self
.
_mech
=
mech
...
...
tests/ut/python/diff_privacy/test_model_train.py
浏览文件 @
9dcca233
...
...
@@ -43,7 +43,7 @@ def dataset_generator(batch_size, batches):
@
pytest
.
mark
.
component_mindarmour
def
test_dp_model_pynative_mode
():
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
"Ascend"
)
l2_norm_bound
=
1.0
norm_clip
=
1.0
initial_noise_multiplier
=
0.01
network
=
LeNet5
()
batch_size
=
32
...
...
@@ -53,11 +53,11 @@ def test_dp_model_pynative_mode():
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
)
factory_opt
=
DPOptimizerClassFactory
(
micro_batches
=
micro_batches
)
factory_opt
.
set_mechanisms
(
'Gaussian'
,
norm_bound
=
l2_norm_bound
,
norm_bound
=
norm_clip
,
initial_noise_multiplier
=
initial_noise_multiplier
)
net_opt
=
factory_opt
.
create
(
'Momentum'
)(
network
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
model
=
DPModel
(
micro_batches
=
micro_batches
,
norm_clip
=
l2_norm_bound
,
norm_clip
=
norm_clip
,
mech
=
None
,
network
=
network
,
loss_fn
=
loss
,
...
...
@@ -75,7 +75,7 @@ def test_dp_model_pynative_mode():
@
pytest
.
mark
.
component_mindarmour
def
test_dp_model_with_graph_mode
():
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
"Ascend"
)
l2_norm_bound
=
1.0
norm_clip
=
1.0
initial_noise_multiplier
=
0.01
network
=
LeNet5
()
batch_size
=
32
...
...
@@ -83,11 +83,11 @@ def test_dp_model_with_graph_mode():
epochs
=
1
loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
)
mech
=
MechanismsFactory
().
create
(
'Gaussian'
,
norm_bound
=
l2_norm_bound
,
norm_bound
=
norm_clip
,
initial_noise_multiplier
=
initial_noise_multiplier
)
net_opt
=
nn
.
Momentum
(
network
.
trainable_params
(),
learning_rate
=
0.1
,
momentum
=
0.9
)
model
=
DPModel
(
micro_batches
=
2
,
norm_clip
=
l2_norm_bound
,
norm_clip
=
norm_clip
,
mech
=
mech
,
network
=
network
,
loss_fn
=
loss
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录