diff --git a/example/mnist_demo/lenet5_dp.py b/example/mnist_demo/lenet5_dp.py index 9bb0044536793746708ea86e142e0f12c05c0fd2..65aa63ce4b73a0fd0c7dc254963880d502ab17d0 100644 --- a/example/mnist_demo/lenet5_dp.py +++ b/example/mnist_demo/lenet5_dp.py @@ -116,7 +116,7 @@ if __name__ == "__main__": noise_mech = NoiseMechanismsFactory().create(cfg.noise_mechanisms, norm_bound=cfg.norm_bound, initial_noise_multiplier=cfg.initial_noise_multiplier, - noise_update=None) + decay_policy=None) # Create a factory class of clip mechanisms, this method is to adaptive clip # gradients while training, decay_policy support 'Linear' and 'Geometric', # learning_rate is the learning rate to update clip_norm, diff --git a/example/mnist_demo/lenet5_dp_ada_gaussian.py b/example/mnist_demo/lenet5_dp_ada_gaussian.py index 74c641d31776b1eb92eb2ad3639baa42c265f5db..23ac8517b360a6bb6e17cb63afdce5190050e8c6 100644 --- a/example/mnist_demo/lenet5_dp_ada_gaussian.py +++ b/example/mnist_demo/lenet5_dp_ada_gaussian.py @@ -115,7 +115,7 @@ if __name__ == "__main__": noise_mech = NoiseMechanismsFactory().create(cfg.noise_mechanisms, norm_bound=cfg.norm_bound, initial_noise_multiplier=cfg.initial_noise_multiplier, - noise_update='Exp') + decay_policy='Exp') net_opt = nn.Momentum(params=network.trainable_params(), learning_rate=cfg.lr, momentum=cfg.momentum) diff --git a/example/mnist_demo/lenet5_dp_pynative_model.py b/example/mnist_demo/lenet5_dp_pynative_model.py index 13374c17a82f0b05b3cedf075cac10507c12fab9..ab86fdbc73f7bec9559d37609fdb31c9a248b418 100644 --- a/example/mnist_demo/lenet5_dp_pynative_model.py +++ b/example/mnist_demo/lenet5_dp_pynative_model.py @@ -111,7 +111,7 @@ if __name__ == "__main__": dp_opt.set_mechanisms(cfg.noise_mechanisms, norm_bound=cfg.norm_bound, initial_noise_multiplier=cfg.initial_noise_multiplier, - noise_update='Exp') + decay_policy='Exp') # Create a factory class of clip mechanisms, this method is to adaptive clip # gradients while training, decay_policy support 'Linear' and 'Geometric', # learning_rate is the learning rate to update clip_norm, diff --git a/mindarmour/diff_privacy/mechanisms/mechanisms.py b/mindarmour/diff_privacy/mechanisms/mechanisms.py index 0aabcd47d3650b8221b5399b7aba03a99adc39dc..1bde5e97ba65e26e42fb60bfcd6de6d1ff69854b 100644 --- a/mindarmour/diff_privacy/mechanisms/mechanisms.py +++ b/mindarmour/diff_privacy/mechanisms/mechanisms.py @@ -83,7 +83,7 @@ class NoiseMechanismsFactory: @staticmethod def create(mech_name='Gaussian', norm_bound=0.5, initial_noise_multiplier=1.5, seed=0, noise_decay_rate=6e-6, - noise_update=None): + decay_policy=None): """ Args: mech_name(str): Noise generated strategy, could be 'Gaussian' or @@ -97,7 +97,7 @@ class NoiseMechanismsFactory: random number. IF seed!=0 random normal will generate values using given seed. noise_decay_rate(float): Hyper parameter for controlling the noise decay. - noise_update(str): Mechanisms parameters update policy. Default: None, no + decay_policy(str): Mechanisms parameters update policy. Default: None, no parameters need update. Raises: @@ -141,13 +141,13 @@ class NoiseMechanismsFactory: return NoiseGaussianRandom(norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier, seed=seed, - noise_update=noise_update) + decay_policy=decay_policy) if mech_name == 'AdaGaussian': return NoiseAdaGaussianRandom(norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier, seed=seed, noise_decay_rate=noise_decay_rate, - noise_update=noise_update) + decay_policy=decay_policy) raise NameError("The {} is not implement, please choose " "['Gaussian', 'AdaGaussian']".format(mech_name)) @@ -176,7 +176,7 @@ class NoiseGaussianRandom(_Mechanisms): seed(int): Original random seed, if seed=0 random normal will use secure random number. IF seed!=0 random normal will generate values using given seed. - noise_update(str): Mechanisms parameters update policy. Default: None. + decay_policy(str): Mechanisms parameters update policy. Default: None. Returns: Tensor, generated noise with shape like given gradients. @@ -186,13 +186,13 @@ class NoiseGaussianRandom(_Mechanisms): >>> norm_bound = 0.5 >>> initial_noise_multiplier = 1.5 >>> seed = 0 - >>> noise_update = None - >>> net = NoiseGaussianRandom(norm_bound, initial_noise_multiplier, seed, noise_update) + >>> decay_policy = None + >>> net = NoiseGaussianRandom(norm_bound, initial_noise_multiplier, seed, decay_policy) >>> res = net(gradients) >>> print(res) """ - def __init__(self, norm_bound, initial_noise_multiplier, seed, noise_update=None): + def __init__(self, norm_bound, initial_noise_multiplier, seed, decay_policy=None): super(NoiseGaussianRandom, self).__init__() self._norm_bound = check_value_positive('norm_bound', norm_bound) self._norm_bound = Tensor(norm_bound, mstype.float32) @@ -200,9 +200,9 @@ class NoiseGaussianRandom(_Mechanisms): initial_noise_multiplier) self._initial_noise_multiplier = Tensor(initial_noise_multiplier, mstype.float32) self._mean = Tensor(0, mstype.float32) - if noise_update is not None: - raise ValueError('noise_update must be None in GaussianRandom class, but got {}.'.format(noise_update)) - self._noise_update = noise_update + if decay_policy is not None: + raise ValueError('decay_policy must be None in GaussianRandom class, but got {}.'.format(decay_policy)) + self._decay_policy = decay_policy self._seed = seed def construct(self, gradients): @@ -237,7 +237,7 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): random number. IF seed!=0 random normal will generate values using given seed. noise_decay_rate(float): Hyper parameter for controlling the noise decay. - noise_update(str): Noise decay strategy include 'Step', 'Time', 'Exp'. + decay_policy(str): Noise decay strategy include 'Step', 'Time', 'Exp'. Returns: Tensor, generated noise with shape like given gradients. @@ -248,13 +248,13 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): >>> initial_noise_multiplier = 1.5 >>> seed = 0 >>> noise_decay_rate = 6e-4 - >>> noise_update = "Time" - >>> net = NoiseAdaGaussianRandom(norm_bound, initial_noise_multiplier, seed, noise_decay_rate, noise_update) + >>> decay_policy = "Time" + >>> net = NoiseAdaGaussianRandom(norm_bound, initial_noise_multiplier, seed, noise_decay_rate, decay_policy) >>> res = net(gradients) >>> print(res) """ - def __init__(self, norm_bound, initial_noise_multiplier, seed, noise_decay_rate, noise_update): + def __init__(self, norm_bound, initial_noise_multiplier, seed, noise_decay_rate, decay_policy): super(NoiseAdaGaussianRandom, self).__init__(norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier, seed=seed) @@ -263,10 +263,10 @@ class NoiseAdaGaussianRandom(NoiseGaussianRandom): noise_decay_rate = check_param_type('noise_decay_rate', noise_decay_rate, float) check_param_in_range('noise_decay_rate', noise_decay_rate, 0.0, 1.0) self._noise_decay_rate = Tensor(noise_decay_rate, mstype.float32) - if noise_update not in ['Time', 'Step', 'Exp']: - raise NameError("The noise_update must be in ['Time', 'Step', 'Exp'], but " - "get {}".format(noise_update)) - self._noise_update = noise_update + if decay_policy not in ['Time', 'Step', 'Exp']: + raise NameError("The decay_policy must be in ['Time', 'Step', 'Exp'], but " + "get {}".format(decay_policy)) + self._decay_policy = decay_policy class _MechanismsParamsUpdater(Cell): @@ -274,7 +274,7 @@ class _MechanismsParamsUpdater(Cell): Update mechanisms parameters, the parameters will refresh in train period. Args: - noise_update(str): Pass in by the mechanisms class, mechanisms parameters + decay_policy(str): Pass in by the mechanisms class, mechanisms parameters update policy. decay_rate(Tensor): Pass in by the mechanisms class, hyper parameter for controlling the decay size. @@ -286,9 +286,9 @@ class _MechanismsParamsUpdater(Cell): Returns: Tuple, next params value. """ - def __init__(self, noise_update, decay_rate, cur_noise_multiplier, init_noise_multiplier): + def __init__(self, decay_policy, decay_rate, cur_noise_multiplier, init_noise_multiplier): super(_MechanismsParamsUpdater, self).__init__() - self._noise_update = noise_update + self._decay_policy = decay_policy self._decay_rate = decay_rate self._cur_noise_multiplier = cur_noise_multiplier self._init_noise_multiplier = init_noise_multiplier @@ -308,12 +308,12 @@ class _MechanismsParamsUpdater(Cell): Returns: Tuple, next step parameters value. """ - if self._noise_update == 'Time': + if self._decay_policy == 'Time': temp = self._div(self._init_noise_multiplier, self._cur_noise_multiplier) temp = self._add(temp, self._decay_rate) next_noise_multiplier = self._assign(self._cur_noise_multiplier, self._div(self._init_noise_multiplier, temp)) - elif self._noise_update == 'Step': + elif self._decay_policy == 'Step': temp = self._sub(self._one, self._decay_rate) next_noise_multiplier = self._assign(self._cur_noise_multiplier, self._mul(temp, self._cur_noise_multiplier)) diff --git a/mindarmour/diff_privacy/optimizer/optimizer.py b/mindarmour/diff_privacy/optimizer/optimizer.py index dfaae14c5d5a854f62e29a11c1f8ef14c44a2e8d..a06e1f13d68db488c00d8b0a5a5267c467d19630 100644 --- a/mindarmour/diff_privacy/optimizer/optimizer.py +++ b/mindarmour/diff_privacy/optimizer/optimizer.py @@ -127,8 +127,8 @@ class DPOptimizerClassFactory: self._micro_float = Tensor(micro_batches, mstype.float32) self._mech_param_updater = None - if self._mech is not None and self._mech._noise_update is not None: - self._mech_param_updater = _MechanismsParamsUpdater(noise_update=self._mech._noise_update, + if self._mech is not None and self._mech._decay_policy is not None: + self._mech_param_updater = _MechanismsParamsUpdater(decay_policy=self._mech._decay_policy, decay_rate=self._mech._noise_decay_rate, cur_noise_multiplier= self._mech._noise_multiplier, diff --git a/mindarmour/diff_privacy/train/model.py b/mindarmour/diff_privacy/train/model.py index 50fc66145b37a05f9b3355bb174d3e34b2528ca0..f235cb6b802884bb87aa65c32c2d41aeeaeae966 100644 --- a/mindarmour/diff_privacy/train/model.py +++ b/mindarmour/diff_privacy/train/model.py @@ -432,9 +432,9 @@ class _TrainOneStepWithLossScaleCell(Cell): self._cast = P.Cast() self._noise_mech_param_updater = None - if self._noise_mech is not None and self._noise_mech._noise_update is not None: + if self._noise_mech is not None and self._noise_mech._decay_policy is not None: self._noise_mech_param_updater = _MechanismsParamsUpdater( - noise_update=self._noise_mech._noise_update, + decay_policy=self._noise_mech._decay_policy, decay_rate=self._noise_mech._noise_decay_rate, cur_noise_multiplier= self._noise_mech._noise_multiplier, @@ -636,9 +636,9 @@ class _TrainOneStepCell(Cell): self._micro_float = Tensor(micro_batches, mstype.float32) self._noise_mech_param_updater = None - if self._noise_mech is not None and self._noise_mech._noise_update is not None: + if self._noise_mech is not None and self._noise_mech._decay_policy is not None: self._noise_mech_param_updater = _MechanismsParamsUpdater( - noise_update=self._noise_mech._noise_update, + decay_policy=self._noise_mech._decay_policy, decay_rate=self._noise_mech._noise_decay_rate, cur_noise_multiplier= self._noise_mech._noise_multiplier, diff --git a/setup.py b/setup.py index 704e23c4c7fad664884ed49d8be604edbcc265ac..b622c3e714327a66a01b5ca750392b3a2f6fefcf 100644 --- a/setup.py +++ b/setup.py @@ -18,7 +18,7 @@ from setuptools import setup from setuptools.command.egg_info import egg_info from setuptools.command.build_py import build_py -version = '0.5.0' +version = '0.6.0' cur_dir = os.path.dirname(os.path.realpath(__file__)) pkg_dir = os.path.join(cur_dir, 'build') diff --git a/tests/ut/python/diff_privacy/test_mechanisms.py b/tests/ut/python/diff_privacy/test_mechanisms.py index e91a6e47e6992f228f19ab16b2751edb960b8155..d014a3a8f3454c894d1d73a4f9dafeaa9d80874a 100644 --- a/tests/ut/python/diff_privacy/test_mechanisms.py +++ b/tests/ut/python/diff_privacy/test_mechanisms.py @@ -35,7 +35,7 @@ def test_graph_factory(): norm_bound = 1.0 initial_noise_multiplier = 0.1 alpha = 0.5 - noise_update = 'Step' + decay_policy = 'Step' factory = NoiseMechanismsFactory() noise_mech = factory.create('Gaussian', norm_bound, @@ -46,7 +46,7 @@ def test_graph_factory(): norm_bound, initial_noise_multiplier, noise_decay_rate=alpha, - noise_update=noise_update) + decay_policy=decay_policy) ada_noise = ada_noise_mech(grad) print('ada noise: ', ada_noise) @@ -61,7 +61,7 @@ def test_pynative_factory(): norm_bound = 1.0 initial_noise_multiplier = 0.1 alpha = 0.5 - noise_update = 'Step' + decay_policy = 'Step' factory = NoiseMechanismsFactory() noise_mech = factory.create('Gaussian', norm_bound, @@ -72,7 +72,7 @@ def test_pynative_factory(): norm_bound, initial_noise_multiplier, noise_decay_rate=alpha, - noise_update=noise_update) + decay_policy=decay_policy) ada_noise = ada_noise_mech(grad) print('ada noise: ', ada_noise) @@ -87,7 +87,7 @@ def test_pynative_gaussian(): norm_bound = 1.0 initial_noise_multiplier = 0.1 alpha = 0.5 - noise_update = 'Step' + decay_policy = 'Step' factory = NoiseMechanismsFactory() noise_mech = factory.create('Gaussian', norm_bound, @@ -98,7 +98,7 @@ def test_pynative_gaussian(): norm_bound, initial_noise_multiplier, noise_decay_rate=alpha, - noise_update=noise_update) + decay_policy=decay_policy) ada_noise = ada_noise_mech(grad) print('ada noise: ', ada_noise) @@ -113,12 +113,12 @@ def test_graph_ada_gaussian(): norm_bound = 1.0 initial_noise_multiplier = 0.1 noise_decay_rate = 0.5 - noise_update = 'Step' + decay_policy = 'Step' ada_noise_mech = NoiseAdaGaussianRandom(norm_bound, initial_noise_multiplier, seed=0, noise_decay_rate=noise_decay_rate, - noise_update=noise_update) + decay_policy=decay_policy) res = ada_noise_mech(grad) print(res) @@ -133,12 +133,12 @@ def test_pynative_ada_gaussian(): norm_bound = 1.0 initial_noise_multiplier = 0.1 noise_decay_rate = 0.5 - noise_update = 'Step' + decay_policy = 'Step' ada_noise_mech = NoiseAdaGaussianRandom(norm_bound, initial_noise_multiplier, seed=0, noise_decay_rate=noise_decay_rate, - noise_update=noise_update) + decay_policy=decay_policy) res = ada_noise_mech(grad) print(res) @@ -153,13 +153,13 @@ def test_graph_exponential(): norm_bound = 1.0 initial_noise_multiplier = 0.1 alpha = 0.5 - noise_update = 'Exp' + decay_policy = 'Exp' factory = NoiseMechanismsFactory() ada_noise = factory.create('AdaGaussian', norm_bound, initial_noise_multiplier, noise_decay_rate=alpha, - noise_update=noise_update) + decay_policy=decay_policy) ada_noise = ada_noise(grad) print('ada noise: ', ada_noise) @@ -174,13 +174,13 @@ def test_pynative_exponential(): norm_bound = 1.0 initial_noise_multiplier = 0.1 alpha = 0.5 - noise_update = 'Exp' + decay_policy = 'Exp' factory = NoiseMechanismsFactory() ada_noise = factory.create('AdaGaussian', norm_bound, initial_noise_multiplier, noise_decay_rate=alpha, - noise_update=noise_update) + decay_policy=decay_policy) ada_noise = ada_noise(grad) print('ada noise: ', ada_noise) diff --git a/tests/ut/python/diff_privacy/test_model_train.py b/tests/ut/python/diff_privacy/test_model_train.py index 068ed0e700543bf7ba3f4fefb3b4205661cf0db6..e27858d4a36aa56ebbaacd49ef6fe9f11f5b90fd 100644 --- a/tests/ut/python/diff_privacy/test_model_train.py +++ b/tests/ut/python/diff_privacy/test_model_train.py @@ -136,7 +136,7 @@ def test_dp_model_with_graph_mode_ada_gaussian(): norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier, noise_decay_rate=alpha, - noise_update='Exp') + decay_policy='Exp') clip_mech = None net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1, momentum=0.9)