提交 b4d85e56 编写于 作者: J jin-xiulang

Fix issues.

上级 01d40e16
......@@ -49,8 +49,8 @@ class Attack:
numpy.ndarray, generated adversarial examples
Examples:
>>> inputs = Tensor([[0.2, 0.4, 0.5, 0.2], [0.7, 0.2, 0.4, 0.3]])
>>> labels = [3, 0]
>>> inputs = np.array([[0.2, 0.4, 0.5, 0.2], [0.7, 0.2, 0.4, 0.3]])
>>> labels = np.array([3, 0])
>>> advs = attack.batch_generate(inputs, labels, batch_size=2)
"""
arr_x, arr_y = check_pair_numpy_param('inputs', inputs, 'labels', labels)
......
......@@ -107,6 +107,7 @@ class CarliniWagnerL2Attack(Attack):
LOGGER.info(TAG, "init CW object.")
super(CarliniWagnerL2Attack, self).__init__()
self._network = check_model('network', network, Cell)
self._network.set_grad(True)
self._num_classes = check_int_positive('num_classes', num_classes)
self._min = check_param_type('box_min', box_min, float)
self._max = check_param_type('box_max', box_max, float)
......
......@@ -61,6 +61,7 @@ class DeepFool(Attack):
norm_level=2, bounds=None, sparse=True):
super(DeepFool, self).__init__()
self._network = check_model('network', network, Cell)
self._network.set_grad(True)
self._max_iters = check_int_positive('max_iters', max_iters)
self._overshoot = check_value_positive('overshoot', overshoot)
self._norm_level = check_norm_level(norm_level)
......
......@@ -65,6 +65,7 @@ class JSMAAttack(Attack):
super(JSMAAttack).__init__()
LOGGER.debug(TAG, "init jsma class.")
self._network = check_model('network', network, Cell)
self._network.set_grad(True)
self._min = check_value_non_negative('box_min', box_min)
self._max = check_value_non_negative('box_max', box_max)
self._num_classes = check_int_positive('num_classes', num_classes)
......
......@@ -74,8 +74,8 @@ class SimilarityDetector(Detector):
Examples:
>>> detector = SimilarityDetector(model)
>>> detector.fit(Tensor(ori), Tensor(labels))
>>> adv_ids = detector.detect(Tensor(adv))
>>> detector.fit(ori, labels)
>>> adv_ids = detector.detect(adv)
"""
def __init__(self, trans_model, max_k_neighbor=1000, chunk_size=1000,
......
......@@ -53,8 +53,8 @@ class RegionBasedDetector(Detector):
Examples:
>>> detector = RegionBasedDetector(model)
>>> detector.fit(Tensor(ori), Tensor(labels))
>>> adv_ids = detector.detect(Tensor(adv))
>>> detector.fit(ori, labels)
>>> adv_ids = detector.detect(adv)
"""
def __init__(self, model, number_points=10, initial_radius=0.0,
......
......@@ -50,8 +50,8 @@ class SpatialSmoothing(Detector):
Examples:
>>> detector = SpatialSmoothing(model)
>>> detector.fit(Tensor(ori), Tensor(labels))
>>> adv_ids = detector.detect(Tensor(adv))
>>> detector.fit(ori, labels)
>>> adv_ids = detector.detect(adv)
"""
def __init__(self, model, ksize=3, is_local_smooth=True,
......
......@@ -112,14 +112,15 @@ class RDPMonitor(Callback):
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits()
>>> epochs = 2
>>> norm_clip = 1.0
>>> initial_noise_multiplier = 0.01
>>> mech = MechanismsFactory().create('Gaussian',
>>> initial_noise_multiplier = 1.5
>>> mech = NoiseMechanismsFactory().create('AdaGaussian',
>>> norm_bound=norm_clip, initial_noise_multiplier=initial_noise_multiplier)
>>> net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
>>> model = DPModel(micro_batches=2, norm_clip=norm_clip,
>>> mech=mech, network=network, loss_fn=loss, optimizer=net_opt, metrics=None)
>>> rdp = PrivacyMonitorFactory.create(policy='rdp',
>>> num_samples=60000, batch_size=256)
>>> num_samples=60000, batch_size=256,
>>> initial_noise_multiplier=initial_noise_multiplier)
>>> model.train(epochs, ds, callbacks=[rdp], dataset_sink_mode=False)
"""
......@@ -392,15 +393,16 @@ class ZCDPMonitor(Callback):
>>> net_loss = nn.SoftmaxCrossEntropyWithLogits()
>>> epochs = 2
>>> norm_clip = 1.0
>>> initial_noise_multiplier = 0.01
>>> mech = MechanismsFactory().create('Gaussian',
>>> initial_noise_multiplier = 1.5
>>> mech = NoiseMechanismsFactory().create('AdaGaussian',
>>> norm_bound=norm_clip, initial_noise_multiplier=initial_noise_multiplier)
>>> net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
>>> model = DPModel(micro_batches=2, norm_clip=norm_clip,
>>> mech=mech, network=network, loss_fn=loss, optimizer=net_opt, metrics=None)
>>> rdp = PrivacyMonitorFactory.create(policy='rdp',
>>> num_samples=60000, batch_size=256)
>>> model.train(epochs, ds, callbacks=[rdp], dataset_sink_mode=False)
>>> zcdp = PrivacyMonitorFactory.create(policy='zcdp',
>>> num_samples=60000, batch_size=256,
>>> initial_noise_multiplier=initial_noise_multiplier)
>>> model.train(epochs, ds, callbacks=[zcdp], dataset_sink_mode=False)
"""
def __init__(self, num_samples, batch_size, initial_noise_multiplier=1.5,
......
......@@ -70,7 +70,7 @@ class BlackDefenseEvaluate:
>>> def_detection_counts = np.array([1, 5, 10])
>>> true_labels = np.array([3, 1, 0])
>>> max_queries = 100
>>> def_eval = BlackDefenseEvaluat(raw_preds,
>>> def_eval = BlackDefenseEvaluate(raw_preds,
>>> def_preds,
>>> raw_query_counts,
>>> def_query_counts,
......
......@@ -22,6 +22,10 @@ from mindspore import Model
from mindarmour.utils._check_param import check_model, check_numpy_param, \
check_int_positive
from mindarmour.utils.logger import LogUtil
LOGGER = LogUtil.get_instance()
TAG = 'ModelCoverageMetrics'
class ModelCoverageMetrics:
......@@ -59,6 +63,10 @@ class ModelCoverageMetrics:
self._model = check_model('model', model, Model)
self._segmented_num = check_int_positive('segmented_num', segmented_num)
self._neuron_num = check_int_positive('neuron_num', neuron_num)
if self._neuron_num > 1e+10:
msg = 'neuron_num should be less than 1e+10, otherwise a MemoryError' \
'would occur'
LOGGER.error(TAG, msg)
train_dataset = check_numpy_param('train_dataset', train_dataset)
self._lower_bounds = [np.inf]*neuron_num
self._upper_bounds = [-np.inf]*neuron_num
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册