diff --git a/tutorials/source_en/advanced_use/differential_privacy.md b/tutorials/source_en/advanced_use/differential_privacy.md index 20c1383d744781679816fbbfbf9ec56eea81d4cc..0185b04c67e8cfcf538dd6ddcea2e0acb9a84a7f 100644 --- a/tutorials/source_en/advanced_use/differential_privacy.md +++ b/tutorials/source_en/advanced_use/differential_privacy.md @@ -86,25 +86,25 @@ TAG = 'Lenet5_train' ```python cfg = edict({ - 'device_target': 'Ascend', # device used - 'data_path': './MNIST_unzip', # the path of training and testing data set - 'dataset_sink_mode': False, # whether deliver all training data to device one time  - 'num_classes': 10, # the number of classes of model's output - 'lr': 0.01, # the learning rate of model's optimizer - 'momentum': 0.9, # the momentum value of model's optimizer - 'epoch_size': 10, # training epochs - 'batch_size': 256, # batch size for training - 'image_height': 32, # the height of training samples - 'image_width': 32, # the width of training samples - 'save_checkpoint_steps': 234, # the interval steps for saving checkpoint file of the model - 'keep_checkpoint_max': 10, # the maximum number of checkpoint files would be saved - 'micro_batches': 32, # the number of small batches split from an original batch - 'l2_norm_bound': 1.0, # the clip bound of the gradients of model's training parameters - 'initial_noise_multiplier': 1.5, # the initial multiplication coefficient of the noise added to training - # parameters' gradients - 'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training - 'optimizer': 'Momentum' # the base optimizer used for Differential privacy training - }) + 'num_classes': 10, # the number of classes of model's output + 'lr': 0.1, # the learning rate of model's optimizer + 'momentum': 0.9, # the momentum value of model's optimizer + 'epoch_size': 10, # training epochs + 'batch_size': 256, # batch size for training + 'image_height': 32, # the height of training samples + 'image_width': 32, # the width of training samples + 'save_checkpoint_steps': 234, # the interval steps for saving checkpoint file of the model + 'keep_checkpoint_max': 10, # the maximum number of checkpoint files would be saved + 'device_target': 'Ascend', # device used + 'data_path': './MNIST_unzip', # the path of training and testing data set + 'dataset_sink_mode': False, # whether deliver all training data to device one time + 'micro_batches': 16, # the number of small batches split from an original batch + 'norm_clip': 1.0, # the clip bound of the gradients of model's training parameters + 'initial_noise_multiplier': 1.5, # the initial multiplication coefficient of the noise added to training + # parameters' gradients + 'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training + 'optimizer': 'Momentum' # the base optimizer used for Differential privacy training + }) ``` 2. Configure necessary information, including the environment information and execution mode. @@ -320,13 +320,13 @@ ds_train = generate_mnist_dataset(os.path.join(cfg.data_path, "train"), 5. Display the result. - The accuracy of the LeNet model without differential privacy is 99%, and the accuracy of the LeNet model with adaptive differential privacy AdaDP is 91%. + The accuracy of the LeNet model without differential privacy is 99%, and the accuracy of the LeNet model with adaptive differential privacy AdaDP is 98%. ``` ============== Starting Training ============== ... ============== Starting Testing ============== ... - ============== Accuracy: 0.9115 ============== + ============== Accuracy: 0.9879 ============== ``` ### References diff --git a/tutorials/source_zh_cn/advanced_use/differential_privacy.md b/tutorials/source_zh_cn/advanced_use/differential_privacy.md index 0b5e00fc0b67f49e67021aa24d0be69052504135..1c7ad0c175f92593bb3595ffce20992f31849f1d 100644 --- a/tutorials/source_zh_cn/advanced_use/differential_privacy.md +++ b/tutorials/source_zh_cn/advanced_use/differential_privacy.md @@ -72,25 +72,25 @@ TAG = 'Lenet5_train' ```python cfg = edict({ - 'device_target': 'Ascend', # device used - 'data_path': './MNIST_unzip', # the path of training and testing data set - 'dataset_sink_mode': False, # whether deliver all training data to device one time  - 'num_classes': 10, # the number of classes of model's output - 'lr': 0.01, # the learning rate of model's optimizer - 'momentum': 0.9, # the momentum value of model's optimizer - 'epoch_size': 10, # training epochs - 'batch_size': 256, # batch size for training - 'image_height': 32, # the height of training samples - 'image_width': 32, # the width of training samples - 'save_checkpoint_steps': 234, # the interval steps for saving checkpoint file of the model - 'keep_checkpoint_max': 10, # the maximum number of checkpoint files would be saved - 'micro_batches': 32, # the number of small batches split from an original batch - 'l2_norm_bound': 1.0, # the clip bound of the gradients of model's training parameters - 'initial_noise_multiplier': 1.5, # the initial multiplication coefficient of the noise added to training - # parameters' gradients - 'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training - 'optimizer': 'Momentum' # the base optimizer used for Differential privacy training - }) + 'num_classes': 10, # the number of classes of model's output + 'lr': 0.1, # the learning rate of model's optimizer + 'momentum': 0.9, # the momentum value of model's optimizer + 'epoch_size': 10, # training epochs + 'batch_size': 256, # batch size for training + 'image_height': 32, # the height of training samples + 'image_width': 32, # the width of training samples + 'save_checkpoint_steps': 234, # the interval steps for saving checkpoint file of the model + 'keep_checkpoint_max': 10, # the maximum number of checkpoint files would be saved + 'device_target': 'Ascend', # device used + 'data_path': './MNIST_unzip', # the path of training and testing data set + 'dataset_sink_mode': False, # whether deliver all training data to device one time + 'micro_batches': 16, # the number of small batches split from an original batch + 'norm_clip': 1.0, # the clip bound of the gradients of model's training parameters + 'initial_noise_multiplier': 1.5, # the initial multiplication coefficient of the noise added to training + # parameters' gradients + 'mechanisms': 'AdaGaussian', # the method of adding noise in gradients while training + 'optimizer': 'Momentum' # the base optimizer used for Differential privacy training + }) ``` 2. 配置必要的信息,包括环境信息、执行的模式。 @@ -306,13 +306,13 @@ ds_train = generate_mnist_dataset(os.path.join(cfg.data_path, "train"), 5. 结果展示。 - 不加差分隐私的LeNet模型精度稳定在99%,加了自适应差分隐私AdaDP的LeNet模型收敛,精度稳定在91%。 + 不加差分隐私的LeNet模型精度稳定在99%,加了自适应差分隐私AdaDP的LeNet模型收敛,精度稳定在98%。 ``` ============== Starting Training ============== ... ============== Starting Testing ============== ... - ============== Accuracy: 0.9115 ============== + ============== Accuracy: 0.9879 ============== ``` ### 引用