diff --git a/example/membership_inference_demo/eval.py b/example/membership_inference_demo/eval.py index 0b735ac87bd28ce53e8d18d2542f153ba381fde5..a56113b17d879837d6da51f0d17fcee7da20b6b3 100644 --- a/example/membership_inference_demo/eval.py +++ b/example/membership_inference_demo/eval.py @@ -116,7 +116,7 @@ def test(cloud_args=None): net = vgg16(num_classes=args.num_classes, args=args) opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, args.momentum, weight_decay=args.weight_decay) - loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'}) param_dict = load_checkpoint(args.pre_trained) diff --git a/example/membership_inference_demo/main.py b/example/membership_inference_demo/main.py index d87a3c6130f999fe999325ebdad7939b70b335e5..d3b876b57a7e776a04a16f31ce347ee044ed38ff 100644 --- a/example/membership_inference_demo/main.py +++ b/example/membership_inference_demo/main.py @@ -59,7 +59,7 @@ if __name__ == "__main__": # load the pretrained model net = vgg16(args.num_classes, args) - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=get_param_groups(net), learning_rate=0.1, momentum=0.9, weight_decay=args.weight_decay, loss_scale=args.loss_scale) load_param_into_net(net, load_checkpoint(args.pre_trained)) diff --git a/example/membership_inference_demo/train.py b/example/membership_inference_demo/train.py index f711448b147a1e266794377031149d9c5d677ad3..315684c1257a94be3b644acc4616934d5f0e4efc 100644 --- a/example/membership_inference_demo/train.py +++ b/example/membership_inference_demo/train.py @@ -182,7 +182,7 @@ if __name__ == '__main__': weight_decay=args.weight_decay, loss_scale=args.loss_scale) - loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean', is_grad=False) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') model = Model(network, loss_fn=loss, optimizer=opt, metrics={'acc'}, amp_level="O2", keep_batchnorm_fp32=False, loss_scale_manager=None) diff --git a/example/mnist_demo/lenet5_dp.py b/example/mnist_demo/lenet5_dp.py index 65aa63ce4b73a0fd0c7dc254963880d502ab17d0..ee2f711263023eb024a82c2da3e63dbaae737927 100644 --- a/example/mnist_demo/lenet5_dp.py +++ b/example/mnist_demo/lenet5_dp.py @@ -91,8 +91,7 @@ if __name__ == "__main__": context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig( save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) diff --git a/example/mnist_demo/lenet5_dp_ada_gaussian.py b/example/mnist_demo/lenet5_dp_ada_gaussian.py index d2b84c4dcc882fbdd9b712d0dedc2a45a2e94269..10faa88f6388913236dc9bbaad4fa9c0aa4058e7 100644 --- a/example/mnist_demo/lenet5_dp_ada_gaussian.py +++ b/example/mnist_demo/lenet5_dp_ada_gaussian.py @@ -90,8 +90,7 @@ if __name__ == "__main__": context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig( save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) diff --git a/example/mnist_demo/lenet5_dp_ada_sgd_graph.py b/example/mnist_demo/lenet5_dp_ada_sgd_graph.py index f72e19033d0e0c0cde3a381ba9f2ba6da25dba00..491962a76f8edd4cdffbae36667b814fc7890dc8 100644 --- a/example/mnist_demo/lenet5_dp_ada_sgd_graph.py +++ b/example/mnist_demo/lenet5_dp_ada_sgd_graph.py @@ -90,8 +90,7 @@ if __name__ == "__main__": context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig( save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) diff --git a/example/mnist_demo/lenet5_dp_optimizer.py b/example/mnist_demo/lenet5_dp_optimizer.py index 122ad8b180f8704d13e50ce8e76eb4d24eb0256f..2181251b01f4615345c934eeeade30510ee2d247 100644 --- a/example/mnist_demo/lenet5_dp_optimizer.py +++ b/example/mnist_demo/lenet5_dp_optimizer.py @@ -89,7 +89,7 @@ def generate_mnist_dataset(data_path, batch_size=32, repeat_size=1, if __name__ == "__main__": context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps, keep_checkpoint_max=cfg.keep_checkpoint_max) ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", diff --git a/example/mnist_demo/lenet5_mnist_coverage.py b/example/mnist_demo/lenet5_mnist_coverage.py index c497af698ce6140a8c0841a6c2bf21f24268bde9..35830ea135a47ea1a926193d556b4bcfc28ee3c9 100644 --- a/example/mnist_demo/lenet5_mnist_coverage.py +++ b/example/mnist_demo/lenet5_mnist_coverage.py @@ -73,7 +73,7 @@ def test_lenet_mnist_coverage(): LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac()) # generate adv_data - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) adv_data = attack.batch_generate(test_images, test_labels, batch_size=32) model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5) diff --git a/example/mnist_demo/mnist_attack_fgsm.py b/example/mnist_demo/mnist_attack_fgsm.py index 6b624c26a31f12f1b8252e0b31ef748a1872e994..090b563a8c2cda578a56234c25b73f5a2d7659b7 100644 --- a/example/mnist_demo/mnist_attack_fgsm.py +++ b/example/mnist_demo/mnist_attack_fgsm.py @@ -75,7 +75,7 @@ def test_fast_gradient_sign_method(): LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) # attacking - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) start_time = time.clock() adv_data = attack.batch_generate(np.concatenate(test_images), diff --git a/example/mnist_demo/mnist_attack_lbfgs.py b/example/mnist_demo/mnist_attack_lbfgs.py index 43629dfd19c086ba1d4b03ac11113bc001db3543..dace1dd2a257164b5dd852f521ada6583c8cfad2 100644 --- a/example/mnist_demo/mnist_attack_lbfgs.py +++ b/example/mnist_demo/mnist_attack_lbfgs.py @@ -83,7 +83,7 @@ def test_lbfgs_attack(): targeted_labels[i] = (targeted_labels[i] + 1) % 10 else: targeted_labels = true_labels.astype(np.int32) - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = LBFGS(net, is_targeted=is_targeted, loss_fn=loss) start_time = time.clock() adv_data = attack.batch_generate(np.concatenate(test_images), diff --git a/example/mnist_demo/mnist_attack_mdi2fgsm.py b/example/mnist_demo/mnist_attack_mdi2fgsm.py index e441235bf94c20290f10f59d243058f48c348138..c717c0834ce308acdac50caeda97e3e69362111e 100644 --- a/example/mnist_demo/mnist_attack_mdi2fgsm.py +++ b/example/mnist_demo/mnist_attack_mdi2fgsm.py @@ -77,7 +77,7 @@ def test_momentum_diverse_input_iterative_method(): LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) # attacking - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = MomentumDiverseInputIterativeMethod(net, loss_fn=loss) start_time = time.clock() adv_data = attack.batch_generate(np.concatenate(test_images), diff --git a/example/mnist_demo/mnist_attack_pgd.py b/example/mnist_demo/mnist_attack_pgd.py index e3659d1410b5f6ff650ae62e7188c9f3ba63b458..d64aa9cbb50ebf43c17b3dd34ef1250dc221b916 100644 --- a/example/mnist_demo/mnist_attack_pgd.py +++ b/example/mnist_demo/mnist_attack_pgd.py @@ -75,7 +75,7 @@ def test_projected_gradient_descent_method(): LOGGER.info(TAG, "prediction accuracy before attacking is : %s", accuracy) # attacking - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = ProjectedGradientDescent(net, eps=0.3, loss_fn=loss) start_time = time.clock() adv_data = attack.batch_generate(np.concatenate(test_images), diff --git a/example/mnist_demo/mnist_defense_nad.py b/example/mnist_demo/mnist_defense_nad.py index 0a68ecbb6e98e6f24f8d747cc6101a17e1ac932a..62fb458e825fe8d39db47a23a4850f2e0332f39a 100644 --- a/example/mnist_demo/mnist_defense_nad.py +++ b/example/mnist_demo/mnist_defense_nad.py @@ -48,7 +48,7 @@ def test_nad_method(): ds_train = generate_mnist_dataset(os.path.join(mnist_path, "train"), batch_size=batch_size, repeat_size=1) net = LeNet5() - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(net.trainable_params(), 0.01, 0.09) model = Model(net, loss, opt, metrics=None) model.train(10, ds_train, callbacks=[LossMonitor()], diff --git a/example/mnist_demo/mnist_evaluation.py b/example/mnist_demo/mnist_evaluation.py index 389e80d58c175c5b1714e30ea720dd990fb5874a..38d75a710c035d0831133cbfa115437cff7653bd 100644 --- a/example/mnist_demo/mnist_evaluation.py +++ b/example/mnist_demo/mnist_evaluation.py @@ -164,7 +164,7 @@ def test_black_defense(): wb_model = ModelToBeAttacked(wb_net) # gen white-box adversarial examples of test data - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) wb_attack = FastGradientSignMethod(wb_net, eps=0.3, loss_fn=loss) wb_adv_sample = wb_attack.generate(attacked_sample, attacked_true_label) diff --git a/example/mnist_demo/mnist_train.py b/example/mnist_demo/mnist_train.py index d44fe481fc4bf9a878b8199734d8b5bf6139ec7f..ea86c4f37bea31a63c75efb712b42cf7ab9e055b 100644 --- a/example/mnist_demo/mnist_train.py +++ b/example/mnist_demo/mnist_train.py @@ -38,8 +38,7 @@ def mnist_train(epoch_size, batch_size, lr, momentum): batch_size=batch_size, repeat_size=1) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), lr, momentum) config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10) diff --git a/mindarmour/attacks/gradient_method.py b/mindarmour/attacks/gradient_method.py index da01df6ae2223f60b9c54b173329ad1729db1c88..7188c8a977d7fccd55dd147722384e6a1bd47411 100644 --- a/mindarmour/attacks/gradient_method.py +++ b/mindarmour/attacks/gradient_method.py @@ -73,8 +73,7 @@ class GradientMethod(Attack): else: self._alpha = alpha if loss_fn is None: - loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, - sparse=False) + loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) with_loss_cell = WithLossCell(self._network, loss_fn) self._grad_all = GradWrapWithLoss(with_loss_cell) self._grad_all.set_train() diff --git a/mindarmour/attacks/iterative_gradient_method.py b/mindarmour/attacks/iterative_gradient_method.py index 00fa5d8093b309659c22c675e034da8ba83ed129..30116036dea8e64bcc68a30fe7431f509906d7f0 100644 --- a/mindarmour/attacks/iterative_gradient_method.py +++ b/mindarmour/attacks/iterative_gradient_method.py @@ -129,7 +129,7 @@ class IterativeGradientMethod(Attack): for b in self._bounds: _ = check_param_multi_types('bound', b, [int, float]) if loss_fn is None: - loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) self._loss_grad = GradWrapWithLoss(WithLossCell(self._network, loss_fn)) self._loss_grad.set_train() diff --git a/mindarmour/attacks/lbfgs.py b/mindarmour/attacks/lbfgs.py index 790e5ea9bfaf9cca2846cccdd69527fe79e1a922..fb739610fe63eff6e1e1c801e968ab5218874dc3 100644 --- a/mindarmour/attacks/lbfgs.py +++ b/mindarmour/attacks/lbfgs.py @@ -66,7 +66,7 @@ class LBFGS(Attack): self._nb_iter = check_int_positive('nb_iter', nb_iter) self._search_iters = check_int_positive('search_iters', search_iters) if loss_fn is None: - loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False) + loss_fn = SoftmaxCrossEntropyWithLogits(sparse=False) with_loss_cell = WithLossCell(self._network, loss_fn) self._grad_all = GradWrapWithLoss(with_loss_cell) self._dtype = None diff --git a/mindarmour/defenses/adversarial_defense.py b/mindarmour/defenses/adversarial_defense.py index 5d9c29c6dc6521bb7eec4a7d6bd265075e7bc16f..7c066e7aeb9f1ba06a9822669ded4a0f6eaea022 100644 --- a/mindarmour/defenses/adversarial_defense.py +++ b/mindarmour/defenses/adversarial_defense.py @@ -58,7 +58,7 @@ class AdversarialDefense(Defense): >>> net = Net() >>> lr = 0.0001 >>> momentum = 0.9 - >>> loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + >>> loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True) >>> optimizer = Momentum(net.trainable_params(), lr, momentum) >>> adv_defense = AdversarialDefense(net, loss_fn, optimizer) >>> inputs = np.random.rand(32, 1, 28, 28).astype(np.float32) @@ -70,7 +70,7 @@ class AdversarialDefense(Defense): super(AdversarialDefense, self).__init__(network) network = check_model('network', network, Cell) if loss_fn is None: - loss_fn = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss_fn = SoftmaxCrossEntropyWithLogits(sparse=True) if optimizer is None: optimizer = Momentum( diff --git a/mindarmour/diff_privacy/mechanisms/mechanisms.py b/mindarmour/diff_privacy/mechanisms/mechanisms.py index bfe3a582fb3857d5e8cda13459fe938b31b5bbb1..06157e65bc557a706249952cdb04b15152b7b5fd 100644 --- a/mindarmour/diff_privacy/mechanisms/mechanisms.py +++ b/mindarmour/diff_privacy/mechanisms/mechanisms.py @@ -123,7 +123,7 @@ class NoiseMechanismsFactory: >>> batch_size = 32 >>> batches = 128 >>> epochs = 1 - >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) >>> noise_mech = NoiseMechanismsFactory().create('Gaussian', >>> norm_bound=norm_bound, >>> initial_noise_multiplier=initial_noise_multiplier) diff --git a/mindarmour/diff_privacy/train/model.py b/mindarmour/diff_privacy/train/model.py index 4d43650dd5f25bcba9a036078bf390c93e345a73..e2ad470e2a490aa6fc52099ed5d4071cee32797e 100644 --- a/mindarmour/diff_privacy/train/model.py +++ b/mindarmour/diff_privacy/train/model.py @@ -39,7 +39,7 @@ from mindspore.ops.operations import NPUClearFloatStatus from mindspore.ops.operations import ReduceSum from mindspore.ops.operations import LessEqual from mindspore.ops.operations import ControlDepend -from mindspore.parallel._utils import _get_mirror_mean +from mindspore.parallel._utils import _get_gradients_mean from mindspore.parallel._utils import _get_device_num from mindspore.nn.wrap.grad_reducer import DistributedGradReducer from mindspore.common.parameter import Parameter @@ -93,7 +93,7 @@ class DPModel(Model): >>> batches = 128 >>> epochs = 1 >>> micro_batches = 2 - >>> loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + >>> loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) >>> factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) >>> factory_opt.set_mechanisms('Gaussian', >>> norm_bound=norm_bound, @@ -405,7 +405,7 @@ class _TrainOneStepWithLossScaleCell(Cell): self.reducer_flag = self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL] if self.reducer_flag: - mean = _get_mirror_mean() + mean = _get_gradients_mean() degree = _get_device_num() self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) @@ -611,7 +611,7 @@ class _TrainOneStepCell(Cell): ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL): self.reducer_flag = True if self.reducer_flag: - mean = _get_mirror_mean() + mean = _get_gradients_mean() degree = _get_device_num() self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) diff --git a/tests/ut/python/attacks/test_gradient_method.py b/tests/ut/python/attacks/test_gradient_method.py index 65c748f2cb711019fb5cc9a406d2999a058af077..075617e1994a51823c65ea965f4b578b876f396e 100644 --- a/tests/ut/python/attacks/test_gradient_method.py +++ b/tests/ut/python/attacks/test_gradient_method.py @@ -111,7 +111,7 @@ def test_fast_gradient_method_cpu(): input_np = np.asarray([[0.1, 0.2, 0.7]], np.float32) label = np.asarray([2], np.int32) - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = FastGradientMethod(Net(), loss_fn=loss) ms_adv_x = attack.generate(input_np, label) diff --git a/tests/ut/python/defenses/mock_net.py b/tests/ut/python/defenses/mock_net.py index d9ad42d1615d39342ea05fee69907373c43e1ae6..a67609ab4f9c2e51a240d736d2803f49cbe1c755 100644 --- a/tests/ut/python/defenses/mock_net.py +++ b/tests/ut/python/defenses/mock_net.py @@ -95,7 +95,7 @@ if __name__ == '__main__': attack.generate(inputs_np, labels_np) # test train ops - loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) loss_net = WithLossCell(net, loss_fn) diff --git a/tests/ut/python/defenses/test_ad.py b/tests/ut/python/defenses/test_ad.py index d726c361d8db5b8c14e85a69faa497b96d402919..1c3383d24c8eed3e6e6157732071bbe4f3b96fa0 100644 --- a/tests/ut/python/defenses/test_ad.py +++ b/tests/ut/python/defenses/test_ad.py @@ -52,7 +52,7 @@ def test_ad(): labels = np.eye(num_classes)[labels].astype(np.float32) net = Net() - loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(learning_rate=Tensor(np.array([0.001], np.float32)), momentum=0.9, params=net.trainable_params()) diff --git a/tests/ut/python/defenses/test_ead.py b/tests/ut/python/defenses/test_ead.py index f44cc412bd35bff87f511b7d95d2f99c79fd5c5d..a5a205c17dc3c33fec9f70c2ede33eefa539eaf4 100644 --- a/tests/ut/python/defenses/test_ead.py +++ b/tests/ut/python/defenses/test_ead.py @@ -54,7 +54,7 @@ def test_ead(): labels = np.eye(num_classes)[labels].astype(np.float32) net = Net() - loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(net.trainable_params(), 0.001, 0.9) net = Net() diff --git a/tests/ut/python/defenses/test_nad.py b/tests/ut/python/defenses/test_nad.py index f0d89812c8b03cf54e61a772c0c121fb2ca567e8..ab9d59c6e7f03cf65814a3542d8f997d7036a5a0 100644 --- a/tests/ut/python/defenses/test_nad.py +++ b/tests/ut/python/defenses/test_nad.py @@ -52,7 +52,7 @@ def test_nad(): labels = np.eye(num_classes)[labels].astype(np.float32) net = Net() - loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(net.trainable_params(), 0.001, 0.9) # defense diff --git a/tests/ut/python/defenses/test_pad.py b/tests/ut/python/defenses/test_pad.py index e92d7cef6a03edb828ba77fb2e770207961a7bea..b38a01e0ec4b0bba90f0118632bf9e4a9f051db8 100644 --- a/tests/ut/python/defenses/test_pad.py +++ b/tests/ut/python/defenses/test_pad.py @@ -53,7 +53,7 @@ def test_pad(): # construct network net = Net() - loss_fn = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=sparse) + loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=sparse) optimizer = Momentum(net.trainable_params(), 0.001, 0.9) # defense diff --git a/tests/ut/python/diff_privacy/test_membership_inference.py b/tests/ut/python/diff_privacy/test_membership_inference.py index c11d45a4c46b2d74eac4f15a02e8f6f9958bb079..f465b84235037ab88f29134a0bbe2eb4d0dacc6a 100644 --- a/tests/ut/python/diff_privacy/test_membership_inference.py +++ b/tests/ut/python/diff_privacy/test_membership_inference.py @@ -48,7 +48,7 @@ def dataset_generator(batch_size, batches): @pytest.mark.component_mindarmour def test_get_membership_inference_object(): net = Net() - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) model = Model(network=net, loss_fn=loss, optimizer=opt) inference_model = MembershipInference(model) @@ -62,7 +62,7 @@ def test_get_membership_inference_object(): @pytest.mark.component_mindarmour def test_membership_inference_object_train(): net = Net() - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) model = Model(network=net, loss_fn=loss, optimizer=opt) inference_model = MembershipInference(model) @@ -92,7 +92,7 @@ def test_membership_inference_object_train(): @pytest.mark.component_mindarmour def test_membership_inference_eval(): net = Net() - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) opt = nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) model = Model(network=net, loss_fn=loss, optimizer=opt) inference_model = MembershipInference(model) diff --git a/tests/ut/python/diff_privacy/test_model_train.py b/tests/ut/python/diff_privacy/test_model_train.py index e27858d4a36aa56ebbaacd49ef6fe9f11f5b90fd..bdfde277f8bba0df16389843818ab2753ec25c0b 100644 --- a/tests/ut/python/diff_privacy/test_model_train.py +++ b/tests/ut/python/diff_privacy/test_model_train.py @@ -53,7 +53,7 @@ def test_dp_model_with_pynative_mode(): batches = 128 epochs = 1 micro_batches = 2 - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches) factory_opt.set_mechanisms('Gaussian', norm_bound=norm_bound, @@ -92,7 +92,7 @@ def test_dp_model_with_graph_mode(): batch_size = 32 batches = 128 epochs = 1 - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) noise_mech = NoiseMechanismsFactory().create('Gaussian', norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier) @@ -131,7 +131,7 @@ def test_dp_model_with_graph_mode_ada_gaussian(): batches = 128 epochs = 1 alpha = 0.8 - loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) noise_mech = NoiseMechanismsFactory().create('AdaGaussian', norm_bound=norm_bound, initial_noise_multiplier=initial_noise_multiplier, diff --git a/tests/ut/python/diff_privacy/test_monitor.py b/tests/ut/python/diff_privacy/test_monitor.py index 9169b273543b2007f7bf2f537aac683b2b4e5b92..4f9dcdd40a0d814ef34f3f5a0b163de9802bd1b0 100644 --- a/tests/ut/python/diff_privacy/test_monitor.py +++ b/tests/ut/python/diff_privacy/test_monitor.py @@ -58,8 +58,7 @@ def test_dp_monitor(): LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) @@ -88,8 +87,7 @@ def test_dp_monitor_gpu(): LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) @@ -118,8 +116,7 @@ def test_dp_monitor_cpu(): LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) @@ -149,8 +146,7 @@ def test_dp_monitor_zcdp(): LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) @@ -179,8 +175,7 @@ def test_dp_monitor_zcdp_gpu(): LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) @@ -209,8 +204,7 @@ def test_dp_monitor_zcdp_cpu(): LOGGER.info(TAG, 'The recommended maximum training epochs is: %s', suggest_epoch) network = LeNet5() - net_loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, - reduction="mean") + net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean") net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9) model = Model(network, net_loss, net_opt) diff --git a/tests/ut/python/fuzzing/test_coverage_metrics.py b/tests/ut/python/fuzzing/test_coverage_metrics.py index 129080921a9785ecdb7bc3435863f9ff2f168e9e..de2e7278e01d203f931868ae21e2884ded02c134 100644 --- a/tests/ut/python/fuzzing/test_coverage_metrics.py +++ b/tests/ut/python/fuzzing/test_coverage_metrics.py @@ -83,7 +83,7 @@ def test_lenet_mnist_coverage_cpu(): LOGGER.info(TAG, 'SNAC of this test is : %s', model_fuzz_test.get_snac()) # generate adv_data - loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) + loss = SoftmaxCrossEntropyWithLogits(sparse=True) attack = FastGradientSignMethod(net, eps=0.3, loss_fn=loss) adv_data = attack.batch_generate(test_data, test_labels, batch_size=32) model_fuzz_test.calculate_coverage(adv_data, bias_coefficient=0.5)