Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MindSpore
mindarmour
提交
d9f12c49
M
mindarmour
项目概览
MindSpore
/
mindarmour
通知
4
Star
2
Fork
3
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindarmour
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
d9f12c49
编写于
8月 19, 2020
作者:
P
pkuliuliu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
support graph mode in DPOptimizer
上级
425cc952
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
35 addition
and
30 deletion
+35
-30
example/mnist_demo/lenet5_dp_optimizer.py
example/mnist_demo/lenet5_dp_optimizer.py
+1
-2
mindarmour/diff_privacy/optimizer/optimizer.py
mindarmour/diff_privacy/optimizer/optimizer.py
+34
-24
mindarmour/diff_privacy/train/model.py
mindarmour/diff_privacy/train/model.py
+0
-4
未找到文件。
example/mnist_demo/lenet5_dp_
pynative_model
.py
→
example/mnist_demo/lenet5_dp_
optimizer
.py
浏览文件 @
d9f12c49
...
...
@@ -87,8 +87,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1,
if
__name__
==
"__main__"
:
# This configure just can run in pynative mode.
context
.
set_context
(
mode
=
context
.
PYNATIVE_MODE
,
device_target
=
cfg
.
device_target
)
context
.
set_context
(
mode
=
context
.
GRAPH_MODE
,
device_target
=
cfg
.
device_target
)
network
=
LeNet5
()
net_loss
=
nn
.
SoftmaxCrossEntropyWithLogits
(
is_grad
=
False
,
sparse
=
True
,
reduction
=
"mean"
)
config_ck
=
CheckpointConfig
(
save_checkpoint_steps
=
cfg
.
save_checkpoint_steps
,
...
...
mindarmour/diff_privacy/optimizer/optimizer.py
浏览文件 @
d9f12c49
...
...
@@ -71,7 +71,7 @@ class DPOptimizerClassFactory:
def
__init__
(
self
,
micro_batches
=
2
):
self
.
_mech_factory
=
NoiseMechanismsFactory
()
self
.
mech
=
None
self
.
_
mech
=
None
self
.
_micro_batches
=
check_int_positive
(
'micro_batches'
,
micro_batches
)
def
set_mechanisms
(
self
,
policy
,
*
args
,
**
kwargs
):
...
...
@@ -81,9 +81,9 @@ class DPOptimizerClassFactory:
Args:
policy (str): Choose mechanism type.
"""
self
.
mech
=
self
.
_mech_factory
.
create
(
policy
,
*
args
,
**
kwargs
)
self
.
_
mech
=
self
.
_mech_factory
.
create
(
policy
,
*
args
,
**
kwargs
)
def
create
(
self
,
policy
,
*
args
,
**
kwargs
):
def
create
(
self
,
policy
):
"""
Create DP optimizer.
...
...
@@ -93,25 +93,29 @@ class DPOptimizerClassFactory:
Returns:
Optimizer, A optimizer with DP.
"""
if
policy
==
'SGD'
:
cls
=
self
.
_get_dp_optimizer_class
(
nn
.
SGD
,
self
.
mech
,
self
.
_micro_batches
,
*
args
,
**
kwargs
)
return
cls
if
policy
==
'Momentum'
:
cls
=
self
.
_get_dp_optimizer_class
(
nn
.
Momentum
,
self
.
mech
,
self
.
_micro_batches
,
*
args
,
**
kwargs
)
return
cls
if
policy
==
'Adam'
:
cls
=
self
.
_get_dp_optimizer_class
(
nn
.
Adam
,
self
.
mech
,
self
.
_micro_batches
,
*
args
,
**
kwargs
)
return
cls
msg
=
"The {} is not implement, please choose ['SGD', 'Momentum', 'Adam']"
.
format
(
policy
)
LOGGER
.
error
(
TAG
,
msg
)
raise
NameError
(
msg
)
def
_get_dp_optimizer_class
(
self
,
cls
,
mech
,
micro_batches
):
dp_opt_class
=
None
policy_
=
policy
.
lower
()
if
policy_
==
'sgd'
:
dp_opt_class
=
self
.
_get_dp_optimizer_class
(
nn
.
SGD
)
elif
policy_
==
'momentum'
:
dp_opt_class
=
self
.
_get_dp_optimizer_class
(
nn
.
Momentum
)
elif
policy_
==
'adam'
:
dp_opt_class
=
self
.
_get_dp_optimizer_class
(
nn
.
Adam
)
else
:
msg
=
"The {} optimizer is not implement, please choose ['SGD', 'Momentum', 'Adam']"
\
.
format
(
policy
)
LOGGER
.
error
(
TAG
,
msg
)
raise
NameError
(
msg
)
return
dp_opt_class
def
_get_dp_optimizer_class
(
self
,
opt_class
):
"""
Wrap original mindspore optimizer with `self._mech`.
"""
mech
=
self
.
_mech
micro_batches
=
self
.
_micro_batches
class
DPOptimizer
(
cl
s
):
class
DPOptimizer
(
opt_clas
s
):
"""
Initialize the DPOptimizerClass.
...
...
@@ -124,7 +128,7 @@ class DPOptimizerClassFactory:
self
.
_mech
=
mech
self
.
_tuple_add
=
_TupleAdd
()
self
.
_hyper_map
=
C
.
HyperMap
()
self
.
_micro_
float
=
Tensor
(
micro_batches
,
mstype
.
float32
)
self
.
_micro_
batches
=
Tensor
(
micro_batches
,
mstype
.
float32
)
self
.
_mech_param_updater
=
None
if
self
.
_mech
is
not
None
and
self
.
_mech
.
_decay_policy
is
not
None
:
...
...
@@ -139,14 +143,20 @@ class DPOptimizerClassFactory:
"""
construct a compute flow.
"""
grad_noise
=
self
.
_hyper_map
(
self
.
_mech
,
gradients
)
grads
=
self
.
_tuple_add
(
gradients
,
grad_noise
)
grads
=
self
.
_hyper_map
(
F
.
partial
(
_grad_scale
,
self
.
_micro_float
),
grads
)
# generate noise
grad_noise_tuple
=
()
for
grad_item
in
gradients
:
grad_noise
=
self
.
_mech
(
grad_item
)
grad_noise_tuple
=
grad_noise_tuple
+
(
grad_noise
,)
# add noise
gradients
=
self
.
_tuple_add
(
gradients
,
grad_noise_tuple
)
# div by self._micro_batches
gradients
=
self
.
_hyper_map
(
F
.
partial
(
_grad_scale
,
self
.
_micro_batches
),
gradients
)
# update mech parameters
if
self
.
_mech_param_updater
is
not
None
:
multiplier
=
self
.
_mech_param_updater
()
grad
s
=
F
.
depend
(
grad
s
,
multiplier
)
gradients
=
super
(
DPOptimizer
,
self
).
construct
(
grads
)
grad
ients
=
F
.
depend
(
gradient
s
,
multiplier
)
gradients
=
super
(
DPOptimizer
,
self
).
construct
(
grad
ient
s
)
return
gradients
return
DPOptimizer
mindarmour/diff_privacy/train/model.py
浏览文件 @
d9f12c49
...
...
@@ -142,10 +142,6 @@ class DPModel(Model):
raise
ValueError
(
msg
)
if
noise_mech
is
None
:
if
"DPOptimizer"
in
opt_name
:
if
context
.
get_context
(
'mode'
)
!=
context
.
PYNATIVE_MODE
:
msg
=
'DPOptimizer just support pynative mode currently.'
LOGGER
.
error
(
TAG
,
msg
)
raise
ValueError
(
msg
)
if
'Ada'
in
opt
.
_mech
.
__class__
.
__name__
and
clip_mech
is
not
None
:
msg
=
"When DPOptimizer's mech method is adaptive, clip_mech must be None."
LOGGER
.
error
(
TAG
,
msg
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录