提交 33dd87c9 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!29 Tuning the MNIST example of Differential privacy training.

Merge pull request !29 from jxlang910/master
......@@ -23,10 +23,10 @@ mnist_cfg = edict({
'lr': 0.01,
'momentum': 0.9,
'epoch_size': 10,
'batch_size': 32,
'batch_size': 256,
'buffer_size': 1000,
'image_height': 32,
'image_width': 32,
'save_checkpoint_steps': 1875,
'save_checkpoint_steps': 234,
'keep_checkpoint_max': 10,
})
......@@ -38,6 +38,7 @@ from lenet5_net import LeNet5
from lenet5_config import mnist_cfg as cfg
LOGGER = LogUtil.get_instance()
LOGGER.set_level('INFO')
TAG = 'Lenet5_train'
......@@ -92,11 +93,11 @@ if __name__ == "__main__":
parser.add_argument('--data_path', type=str, default="./MNIST_unzip",
help='path where the dataset is saved')
parser.add_argument('--dataset_sink_mode', type=bool, default=False, help='dataset_sink_mode is False or True')
parser.add_argument('--micro_batches', type=int, default=None,
parser.add_argument('--micro_batches', type=int, default=32,
help='optional, if use differential privacy, need to set micro_batches')
parser.add_argument('--l2_norm_bound', type=float, default=0.1,
parser.add_argument('--l2_norm_bound', type=float, default=1.0,
help='optional, if use differential privacy, need to set l2_norm_bound')
parser.add_argument('--initial_noise_multiplier', type=float, default=0.001,
parser.add_argument('--initial_noise_multiplier', type=float, default=1.5,
help='optional, if use differential privacy, need to set initial_noise_multiplier')
args = parser.parse_args()
......@@ -120,13 +121,14 @@ if __name__ == "__main__":
gaussian_mech.set_mechanisms('Gaussian',
norm_bound=args.l2_norm_bound,
initial_noise_multiplier=args.initial_noise_multiplier)
net_opt = gaussian_mech.create('SGD')(params=network.trainable_params(),
learning_rate=cfg.lr,
momentum=cfg.momentum)
net_opt = gaussian_mech.create('Momentum')(params=network.trainable_params(),
learning_rate=cfg.lr,
momentum=cfg.momentum)
rdp_monitor = PrivacyMonitorFactory.create('rdp',
num_samples=60000,
batch_size=cfg.batch_size,
initial_noise_multiplier=args.initial_noise_multiplier,
initial_noise_multiplier=args.initial_noise_multiplier*
args.l2_norm_bound,
per_print_times=10)
model = DPModel(micro_batches=args.micro_batches,
norm_clip=args.l2_norm_bound,
......@@ -141,7 +143,7 @@ if __name__ == "__main__":
dataset_sink_mode=args.dataset_sink_mode)
LOGGER.info(TAG, "============== Starting Testing ==============")
ckpt_file_name = 'trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
ckpt_file_name = 'trained_ckpt_file/checkpoint_lenet-10_234.ckpt'
param_dict = load_checkpoint(ckpt_file_name)
load_param_into_net(network, param_dict)
ds_eval = generate_mnist_dataset(os.path.join(args.data_path, 'test'), batch_size=cfg.batch_size)
......
......@@ -29,7 +29,7 @@ def fc_with_initialize(input_channels, out_channels):
def weight_variable():
return TruncatedNormal(0.02)
return TruncatedNormal(0.05)
class LeNet5(nn.Cell):
......
......@@ -72,24 +72,24 @@ class GaussianRandom(Mechanisms):
Args:
norm_bound(float): Clipping bound for the l2 norm of the gradients.
Default: 1.5.
Default: 1.0.
initial_noise_multiplier(float): Ratio of the standard deviation of
Gaussian noise divided by the norm_bound, which will be used to
calculate privacy spent. Default: 5.0.
calculate privacy spent. Default: 1.5.
Returns:
Tensor, generated noise.
Examples:
>>> shape = (3, 2, 4)
>>> norm_bound = 1.5
>>> initial_noise_multiplier = 0.1
>>> norm_bound = 1.0
>>> initial_noise_multiplier = 1.5
>>> net = GaussianRandom(shape, norm_bound, initial_noise_multiplier)
>>> res = net(shape)
>>> print(res)
"""
def __init__(self, norm_bound=1.5, initial_noise_multiplier=5.0):
def __init__(self, norm_bound=1.0, initial_noise_multiplier=1.5):
super(GaussianRandom, self).__init__()
self._norm_bound = check_value_positive('norm_bound', norm_bound)
self._initial_noise_multiplier = check_value_positive('initial_noise_multiplier',
......
......@@ -70,11 +70,11 @@ class RDPMonitor(Callback):
num_samples (int): The total number of samples in training data sets.
batch_size (int): The number of samples in a batch while training.
initial_noise_multiplier (Union[float, int]): The initial
multiplier of added noise. Default: 0.4.
multiplier of added noise. Default: 1.5.
max_eps (Union[float, int, None]): The maximum acceptable epsilon
budget for DP training. Default: 3.0.
budget for DP training. Default: 10.0.
target_delta (Union[float, int, None]): Target delta budget for DP
training. Default: 1e-5.
training. Default: 1e-3.
max_delta (Union[float, int, None]): The maximum acceptable delta
budget for DP training. Max_delta must be less than 1 and
suggested to be less than 1e-3, otherwise overflow would be
......@@ -84,7 +84,7 @@ class RDPMonitor(Callback):
orders (Union[None, list[int, float]]): Finite orders used for
computing rdp, which must be greater than 1.
noise_decay_mode (str): Decay mode of adding noise while training,
which can be 'no_decay', 'time' or 'step'. Default: 'step'.
which can be 'no_decay', 'Time' or 'Step'. Default: 'Time'.
noise_decay_rate (Union[float, None]): Decay rate of noise while
training. Default: 6e-4.
per_print_times (int): The interval steps of computing and printing
......@@ -92,7 +92,7 @@ class RDPMonitor(Callback):
Examples:
>>> rdp = PrivacyMonitorFactory.create(policy='rdp',
>>> num_samples=60000, batch_size=32)
>>> num_samples=60000, batch_size=256)
>>> network = Net()
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits()
>>> net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
......@@ -100,9 +100,9 @@ class RDPMonitor(Callback):
>>> model.train(epochs, ds, callbacks=[rdp], dataset_sink_mode=False)
"""
def __init__(self, num_samples, batch_size, initial_noise_multiplier=0.4,
max_eps=3.0, target_delta=1e-5, max_delta=None,
target_eps=None, orders=None, noise_decay_mode='step',
def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5,
max_eps=10.0, target_delta=1e-3, max_delta=None,
target_eps=None, orders=None, noise_decay_mode='Time',
noise_decay_rate=6e-4, per_print_times=50):
super(RDPMonitor, self).__init__()
check_int_positive('num_samples', num_samples)
......@@ -132,8 +132,8 @@ class RDPMonitor(Callback):
msg = 'orders must be greater than 1'
LOGGER.error(TAG, msg)
raise ValueError(msg)
if noise_decay_mode not in ('no_decay', 'step', 'time'):
msg = 'Noise decay mode must be in (no_decay, step, time)'
if noise_decay_mode not in ('no_decay', 'Step', 'Time'):
msg = "Noise decay mode must be in ('no_decay', 'Step', 'Time')"
LOGGER.error(TAG, msg)
raise ValueError(msg)
if noise_decay_rate is not None:
......@@ -256,11 +256,11 @@ class RDPMonitor(Callback):
LOGGER.error(TAG, msg)
raise ValueError(msg)
if self._noise_decay_mode == 'time':
if self._noise_decay_mode == 'Time':
noise_step = [self._initial_noise_multiplier / (
1 + self._noise_decay_rate * step) for step in steps]
elif self._noise_decay_mode == 'step':
elif self._noise_decay_mode == 'Step':
noise_step = [self._initial_noise_multiplier * (
1 - self._noise_decay_rate) ** step for step in steps]
self._rdp += sum(
......
......@@ -34,8 +34,8 @@ class DPOptimizerClassFactory:
Examples:
>>> GaussianSGD = DPOptimizerClassFactory(micro_batches=2)
>>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.5, initial_noise_multiplier=5.0)
>>> net_opt = GaussianSGD.create('SGD')(params=network.trainable_params(),
>>> GaussianSGD.set_mechanisms('Gaussian', norm_bound=1.0, initial_noise_multiplier=1.5)
>>> net_opt = GaussianSGD.create('Momentum')(params=network.trainable_params(),
>>> learning_rate=cfg.lr,
>>> momentum=cfg.momentum)
"""
......
......@@ -91,7 +91,7 @@ class DPModel(Model):
>>>
>>> net = Net()
>>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.01, momentum=0.9)
>>> gaussian_mech = DPOptimizerClassFactory()
>>> gaussian_mech.set_mechanisms('Gaussian',
>>> norm_bound=args.l2_norm_bound,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册