diff --git a/tutorials/source_en/advanced_use/differential_privacy.md b/tutorials/source_en/advanced_use/differential_privacy.md index 072a5139e41c8e1f4c6751f106722175cf7cbe4a..8d26d34e2c8c13018d8b25c9feb8301fec229e4c 100644 --- a/tutorials/source_en/advanced_use/differential_privacy.md +++ b/tutorials/source_en/advanced_use/differential_privacy.md @@ -72,7 +72,7 @@ import mindspore.common.dtype as mstype from mindarmour.diff_privacy import DPModel from mindarmour.diff_privacy import PrivacyMonitorFactory -from mindarmour.diff_privacy import NoiseMechanismsFacotry +from mindarmour.diff_privacy import NoiseMechanismsFactory from mindarmour.diff_privacy import ClipMechanismsFactory from mindarmour.utils.logger import LogUtil from lenet5_net import LeNet5 @@ -85,7 +85,7 @@ TAG = 'Lenet5_train' ### Configuring Parameters -1. Set the running environment, dataset path, model training parameters, checkpoint storage parameters, and differential privacy parameters. Replace 'data_path' with you data path. +1. Set the running environment, dataset path, model training parameters, checkpoint storage parameters, and differential privacy parameters. Replace 'data_path' with you data path. For more configurations, see . ```python cfg = edict({ @@ -101,9 +101,9 @@ TAG = 'Lenet5_train' 'device_target': 'Ascend', # device used 'data_path': './MNIST_unzip', # the path of training and testing data set 'dataset_sink_mode': False, # whether deliver all training data to device one time - 'micro_batches': 16, # the number of small batches split from an original batch + 'micro_batches': 32, # the number of small batches split from an original batch 'norm_bound': 1.0, # the clip bound of the gradients of model's training parameters - 'initial_noise_multiplier': 1.0, # the initial multiplication coefficient of the noise added to training + 'initial_noise_multiplier': 0.05, # the initial multiplication coefficient of the noise added to training # parameters' gradients 'noise_mechanisms': 'Gaussian', # the method of adding noise in gradients while training 'clip_mechanisms': 'Gaussian', # the method of adaptive clipping gradients while training diff --git a/tutorials/source_zh_cn/advanced_use/differential_privacy.md b/tutorials/source_zh_cn/advanced_use/differential_privacy.md index 06b6482ea2f266caac5fc6eaa8a8be9024c954db..41a2cdb82d05448c28eafe1cda6c4e2fb08e62df 100644 --- a/tutorials/source_zh_cn/advanced_use/differential_privacy.md +++ b/tutorials/source_zh_cn/advanced_use/differential_privacy.md @@ -57,7 +57,7 @@ from mindspore.dataset.transforms.vision import Inter import mindspore.common.dtype as mstype from mindarmour.diff_privacy import DPModel -from mindarmour.diff_privacy import NoiseMechanismsFacotry +from mindarmour.diff_privacy import NoiseMechanismsFactory from mindarmour.diff_privacy import ClipMechanismsFactory from mindarmour.diff_privacy import PrivacyMonitorFactory from mindarmour.utils.logger import LogUtil @@ -71,7 +71,7 @@ TAG = 'Lenet5_train' ### 参数配置 -1. 设置运行环境、数据集路径、模型训练参数、checkpoint存储参数、差分隐私参数,`data_path`数据路径替换成你的数据集所在路径。 +1. 设置运行环境、数据集路径、模型训练参数、checkpoint存储参数、差分隐私参数,`data_path`数据路径替换成你的数据集所在路径。更多配置可以参考。 ```python cfg = edict({ @@ -87,9 +87,9 @@ TAG = 'Lenet5_train' 'device_target': 'Ascend', # device used 'data_path': './MNIST_unzip', # the path of training and testing data set 'dataset_sink_mode': False, # whether deliver all training data to device one time - 'micro_batches': 16, # the number of small batches split from an original batch + 'micro_batches': 32, # the number of small batches split from an original batch 'norm_bound': 1.0, # the clip bound of the gradients of model's training parameters - 'initial_noise_multiplier': 1.0, # the initial multiplication coefficient of the noise added to training + 'initial_noise_multiplier': 0.05, # the initial multiplication coefficient of the noise added to training # parameters' gradients 'noise_mechanisms': 'Gaussian', # the method of adding noise in gradients while training 'clip_mechanisms': 'Gaussian', # the method of adaptive clipping gradients while training