diff --git a/example/mnist_demo/lenet5_config.py b/example/mnist_demo/lenet5_config.py index 69e8efac63ddc9aff83520660797f6d749a5d8cf..e9ac0480a560d31d14ff6821fab95d28733ec767 100644 --- a/example/mnist_demo/lenet5_config.py +++ b/example/mnist_demo/lenet5_config.py @@ -33,7 +33,7 @@ mnist_cfg = edict({ 'dataset_sink_mode': False, # whether deliver all training data to device one time 'micro_batches': 16, # the number of small batches split from an original batch 'norm_clip': 1.0, # the clip bound of the gradients of model's training parameters - 'initial_noise_multiplier': 0.2, # the initial multiplication coefficient of the noise added to training + 'initial_noise_multiplier': 1.5, # the initial multiplication coefficient of the noise added to training # parameters' gradients 'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training 'optimizer': 'Momentum' # the base optimizer used for Differential privacy training diff --git a/example/mnist_demo/lenet5_dp.py b/example/mnist_demo/lenet5_dp.py index c86bf1bcc0523aa288904b4da66e5a9e8410b5b9..bf62fd3155c71a06a14a7d109bfbd7c4d09b0078 100644 --- a/example/mnist_demo/lenet5_dp.py +++ b/example/mnist_demo/lenet5_dp.py @@ -87,7 +87,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, if __name__ == "__main__": # This configure can run both in pynative mode and graph mode - context.set_context(mode=context.PYNATIVE_MODE, device_target=cfg.device_target) + context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) network = LeNet5() net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, diff --git a/mindarmour/diff_privacy/mechanisms/mechanisms.py b/mindarmour/diff_privacy/mechanisms/mechanisms.py index cdcf3a5a64461b70fe8d5c9b3e16ec615493151b..d58724f986f37569013aa92dd28c44fdbc133aa5 100644 --- a/mindarmour/diff_privacy/mechanisms/mechanisms.py +++ b/mindarmour/diff_privacy/mechanisms/mechanisms.py @@ -37,8 +37,8 @@ class MechanismsFactory: """ Args: policy(str): Noise generated strategy, could be 'Gaussian' or - 'AdaGaussian'. Noise would be decayed with 'AdaGaussian' mechanism while - be constant with 'Gaussian' mechanism. Default: 'AdaGaussian'. + 'AdaGaussian'. Noise would be decayed with 'AdaGaussian' mechanism + while be constant with 'Gaussian' mechanism. args(Union[float, str]): Parameters used for creating noise mechanisms. kwargs(Union[float, str]): Parameters used for creating noise @@ -74,7 +74,7 @@ class GaussianRandom(Mechanisms): Args: norm_bound(float): Clipping bound for the l2 norm of the gradients. - Default: 1.0. + Default: 0.5. initial_noise_multiplier(float): Ratio of the standard deviation of Gaussian noise divided by the norm_bound, which will be used to calculate privacy spent. Default: 1.5. @@ -86,14 +86,14 @@ class GaussianRandom(Mechanisms): Examples: >>> gradients = Tensor([0.2, 0.9], mstype.float32) - >>> norm_bound = 1.0 - >>> initial_noise_multiplier = 0.1 + >>> norm_bound = 0.5 + >>> initial_noise_multiplier = 1.5 >>> net = GaussianRandom(norm_bound, initial_noise_multiplier) >>> res = net(gradients) >>> print(res) """ - def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.5, mean=0.0, seed=0): + def __init__(self, norm_bound=0.5, initial_noise_multiplier=1.5, mean=0.0, seed=0): super(GaussianRandom, self).__init__() self._norm_bound = check_value_positive('norm_bound', norm_bound) self._norm_bound = Tensor(norm_bound, mstype.float32) @@ -128,10 +128,10 @@ class AdaGaussianRandom(Mechanisms): Args: norm_bound(float): Clipping bound for the l2 norm of the gradients. - Default: 1.5. + Default: 1.0. initial_noise_multiplier(float): Ratio of the standard deviation of Gaussian noise divided by the norm_bound, which will be used to - calculate privacy spent. Default: 5.0. + calculate privacy spent. Default: 1.5. mean(float): Average value of random noise. Default: 0.0 noise_decay_rate(float): Hyper parameter for controlling the noise decay. Default: 6e-4. @@ -145,7 +145,7 @@ class AdaGaussianRandom(Mechanisms): Examples: >>> gradients = Tensor([0.2, 0.9], mstype.float32) >>> norm_bound = 1.0 - >>> initial_noise_multiplier = 5.0 + >>> initial_noise_multiplier = 1.5 >>> mean = 0.0 >>> noise_decay_rate = 6e-4 >>> decay_policy = "Time" @@ -155,7 +155,7 @@ class AdaGaussianRandom(Mechanisms): >>> print(res) """ - def __init__(self, norm_bound=1.5, initial_noise_multiplier=5.0, mean=0.0, + def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.5, mean=0.0, noise_decay_rate=6e-4, decay_policy='Time', seed=0): super(AdaGaussianRandom, self).__init__() norm_bound = check_value_positive('norm_bound', norm_bound)