提交 4f2b3cf4 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!40 Solve issue:[CT][MA][DP]TGaussian default parameters in graph mode is...

!40 Solve issue:[CT][MA][DP]TGaussian default parameters in graph mode is unqualified.  https://gitee.com/mindspore/dashboard/issues?id=I1LMJD
Merge pull request !40 from ZhidanLiu/master
...@@ -33,7 +33,7 @@ mnist_cfg = edict({ ...@@ -33,7 +33,7 @@ mnist_cfg = edict({
'dataset_sink_mode': False, # whether deliver all training data to device one time 'dataset_sink_mode': False, # whether deliver all training data to device one time
'micro_batches': 16, # the number of small batches split from an original batch 'micro_batches': 16, # the number of small batches split from an original batch
'norm_clip': 1.0, # the clip bound of the gradients of model's training parameters 'norm_clip': 1.0, # the clip bound of the gradients of model's training parameters
'initial_noise_multiplier': 0.2, # the initial multiplication coefficient of the noise added to training 'initial_noise_multiplier': 1.5, # the initial multiplication coefficient of the noise added to training
# parameters' gradients # parameters' gradients
'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training 'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training
'optimizer': 'Momentum' # the base optimizer used for Differential privacy training 'optimizer': 'Momentum' # the base optimizer used for Differential privacy training
......
...@@ -87,7 +87,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, ...@@ -87,7 +87,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1,
if __name__ == "__main__": if __name__ == "__main__":
# This configure can run both in pynative mode and graph mode # This configure can run both in pynative mode and graph mode
context.set_context(mode=context.PYNATIVE_MODE, device_target=cfg.device_target) context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target)
network = LeNet5() network = LeNet5()
net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean")
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
......
...@@ -37,8 +37,8 @@ class MechanismsFactory: ...@@ -37,8 +37,8 @@ class MechanismsFactory:
""" """
Args: Args:
policy(str): Noise generated strategy, could be 'Gaussian' or policy(str): Noise generated strategy, could be 'Gaussian' or
'AdaGaussian'. Noise would be decayed with 'AdaGaussian' mechanism while 'AdaGaussian'. Noise would be decayed with 'AdaGaussian' mechanism
be constant with 'Gaussian' mechanism. Default: 'AdaGaussian'. while be constant with 'Gaussian' mechanism.
args(Union[float, str]): Parameters used for creating noise args(Union[float, str]): Parameters used for creating noise
mechanisms. mechanisms.
kwargs(Union[float, str]): Parameters used for creating noise kwargs(Union[float, str]): Parameters used for creating noise
...@@ -74,7 +74,7 @@ class GaussianRandom(Mechanisms): ...@@ -74,7 +74,7 @@ class GaussianRandom(Mechanisms):
Args: Args:
norm_bound(float): Clipping bound for the l2 norm of the gradients. norm_bound(float): Clipping bound for the l2 norm of the gradients.
Default: 1.0. Default: 0.5.
initial_noise_multiplier(float): Ratio of the standard deviation of initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 1.5. calculate privacy spent. Default: 1.5.
...@@ -86,14 +86,14 @@ class GaussianRandom(Mechanisms): ...@@ -86,14 +86,14 @@ class GaussianRandom(Mechanisms):
Examples: Examples:
>>> gradients = Tensor([0.2, 0.9], mstype.float32) >>> gradients = Tensor([0.2, 0.9], mstype.float32)
>>> norm_bound = 1.0 >>> norm_bound = 0.5
>>> initial_noise_multiplier = 0.1 >>> initial_noise_multiplier = 1.5
>>> net = GaussianRandom(norm_bound, initial_noise_multiplier) >>> net = GaussianRandom(norm_bound, initial_noise_multiplier)
>>> res = net(gradients) >>> res = net(gradients)
>>> print(res) >>> print(res)
""" """
def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.5, mean=0.0, seed=0): def __init__(self, norm_bound=0.5, initial_noise_multiplier=1.5, mean=0.0, seed=0):
super(GaussianRandom, self).__init__() super(GaussianRandom, self).__init__()
self._norm_bound = check_value_positive('norm_bound', norm_bound) self._norm_bound = check_value_positive('norm_bound', norm_bound)
self._norm_bound = Tensor(norm_bound, mstype.float32) self._norm_bound = Tensor(norm_bound, mstype.float32)
...@@ -128,10 +128,10 @@ class AdaGaussianRandom(Mechanisms): ...@@ -128,10 +128,10 @@ class AdaGaussianRandom(Mechanisms):
Args: Args:
norm_bound(float): Clipping bound for the l2 norm of the gradients. norm_bound(float): Clipping bound for the l2 norm of the gradients.
Default: 1.5. Default: 1.0.
initial_noise_multiplier(float): Ratio of the standard deviation of initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 5.0. calculate privacy spent. Default: 1.5.
mean(float): Average value of random noise. Default: 0.0 mean(float): Average value of random noise. Default: 0.0
noise_decay_rate(float): Hyper parameter for controlling the noise decay. noise_decay_rate(float): Hyper parameter for controlling the noise decay.
Default: 6e-4. Default: 6e-4.
...@@ -145,7 +145,7 @@ class AdaGaussianRandom(Mechanisms): ...@@ -145,7 +145,7 @@ class AdaGaussianRandom(Mechanisms):
Examples: Examples:
>>> gradients = Tensor([0.2, 0.9], mstype.float32) >>> gradients = Tensor([0.2, 0.9], mstype.float32)
>>> norm_bound = 1.0 >>> norm_bound = 1.0
>>> initial_noise_multiplier = 5.0 >>> initial_noise_multiplier = 1.5
>>> mean = 0.0 >>> mean = 0.0
>>> noise_decay_rate = 6e-4 >>> noise_decay_rate = 6e-4
>>> decay_policy = "Time" >>> decay_policy = "Time"
...@@ -155,7 +155,7 @@ class AdaGaussianRandom(Mechanisms): ...@@ -155,7 +155,7 @@ class AdaGaussianRandom(Mechanisms):
>>> print(res) >>> print(res)
""" """
def __init__(self, norm_bound=1.5, initial_noise_multiplier=5.0, mean=0.0, def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.5, mean=0.0,
noise_decay_rate=6e-4, decay_policy='Time', seed=0): noise_decay_rate=6e-4, decay_policy='Time', seed=0):
super(AdaGaussianRandom, self).__init__() super(AdaGaussianRandom, self).__init__()
norm_bound = check_value_positive('norm_bound', norm_bound) norm_bound = check_value_positive('norm_bound', norm_bound)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册