提交 a233fed9 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!20 solve DI [MS][MindArmour][Doc] some example of mindarmour need added and...

!20 solve DI [MS][MindArmour][Doc] some example of mindarmour need added and useage is clears https://gitee.com/mindspore/dashboard?issue_id=I1GSTN
Merge pull request !20 from ZhidanLiu/master
......@@ -47,6 +47,12 @@ class GradientMethod(Attack):
bounds (tuple): Upper and lower bounds of data, indicating the data range.
In form of (clip_min, clip_max). Default: None.
loss_fn (Loss): Loss function for optimization. Default: None.
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = FastGradientMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=None, bounds=None,
......@@ -84,11 +90,6 @@ class GradientMethod(Attack):
Returns:
numpy.ndarray, generated adversarial examples.
Examples:
>>> adv_x = attack.generate([[0.1, 0.2, 0.6], [0.3, 0, 0.4]],
>>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],[0, , 0, 1, 0, 0, 0, 0, 0, 0,
>>> 0]])
"""
inputs, labels = check_pair_numpy_param('inputs', inputs,
'labels', labels)
......@@ -154,7 +155,10 @@ class FastGradientMethod(GradientMethod):
loss_fn (Loss): Loss function for optimization. Default: None.
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = FastGradientMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=None, bounds=(0.0, 1.0),
......@@ -178,10 +182,6 @@ class FastGradientMethod(GradientMethod):
Returns:
numpy.ndarray, gradient of inputs.
Examples:
>>> grad = self._gradient([[0.2, 0.3, 0.4]],
>>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
"""
out_grad = self._grad_all(Tensor(inputs), Tensor(labels))
if isinstance(out_grad, tuple):
......@@ -219,7 +219,10 @@ class RandomFastGradientMethod(FastGradientMethod):
ValueError: eps is smaller than alpha!
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = RandomFastGradientMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=0.035, bounds=(0.0, 1.0),
......@@ -257,7 +260,10 @@ class FastGradientSignMethod(GradientMethod):
loss_fn (Loss): Loss function for optimization. Default: None.
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = FastGradientSignMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=None, bounds=(0.0, 1.0),
......@@ -280,10 +286,6 @@ class FastGradientSignMethod(GradientMethod):
Returns:
numpy.ndarray, gradient of inputs.
Examples:
>>> grad = self._gradient([[0.2, 0.3, 0.4]],
>>> [[0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
"""
out_grad = self._grad_all(Tensor(inputs), Tensor(labels))
if isinstance(out_grad, tuple):
......@@ -318,7 +320,10 @@ class RandomFastGradientSignMethod(FastGradientSignMethod):
ValueError: eps is smaller than alpha!
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = RandomFastGradientSignMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=0.035, bounds=(0.0, 1.0),
......@@ -351,7 +356,10 @@ class LeastLikelyClassMethod(FastGradientSignMethod):
loss_fn (Loss): Loss function for optimization. Default: None.
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = LeastLikelyClassMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=None, bounds=(0.0, 1.0),
......@@ -385,7 +393,10 @@ class RandomLeastLikelyClassMethod(FastGradientSignMethod):
ValueError: eps is smaller than alpha!
Examples:
>>> inputs = np.array([[0.1, 0.2, 0.6], [0.3, 0, 0.4]])
>>> labels = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 0, 0]])
>>> attack = RandomLeastLikelyClassMethod(network)
>>> adv_x = attack.generate(inputs, labels)
"""
def __init__(self, network, eps=0.07, alpha=0.035, bounds=(0.0, 1.0),
......
......@@ -47,6 +47,16 @@ class ErrorBasedDetector(Detector):
Default: 0.01.
bounds (tuple): (clip_min, clip_max). Default: (0.0, 1.0).
Examples:
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4, 4).astype(np.float32)
>>> np.random.seed(6)
>>> adv = np.random.rand(4, 4, 4).astype(np.float32)
>>> model = Model(Net())
>>> detector = ErrorBasedDetector(model)
>>> detector.fit(ori)
>>> detected_res = detector.detect(adv)
>>> adv_trans = detector.transform(adv)
"""
def __init__(self, auto_encoder, false_positive_rate=0.01,
......@@ -159,6 +169,19 @@ class DivergenceBasedDetector(ErrorBasedDetector):
t (int): Temperature used to overcome numerical problem. Default: 1.
bounds (tuple): Upper and lower bounds of data.
In form of (clip_min, clip_max). Default: (0.0, 1.0).
Examples:
>>> np.random.seed(5)
>>> ori = np.random.rand(4, 4, 4).astype(np.float32)
>>> np.random.seed(6)
>>> adv = np.random.rand(4, 4, 4).astype(np.float32)
>>> encoder = Model(Net())
>>> model = Model(PredNet())
>>> detector = DivergenceBasedDetector(encoder, model)
>>> threshold = detector.fit(ori)
>>> detector.set_threshold(threshold)
>>> detected_res = detector.detect(adv)
>>> adv_trans = detector.transform(adv)
"""
def __init__(self, auto_encoder, model, option="jsd",
......
......@@ -37,6 +37,16 @@ class ModelCoverageMetrics:
n (int): The number of testing neurons.
train_dataset (numpy.ndarray): Training dataset used for determine
the neurons' output boundaries.
Examples:
>>> train_images = np.random.random((10000, 128)).astype(np.float32)
>>> test_images = np.random.random((5000, 128)).astype(np.float32)
>>> model = Model(net)
>>> model_fuzz_test = ModelCoverageMetrics(model, 10000, 10, train_images)
>>> model_fuzz_test.test_adequacy_coverage_calculate(test_images)
>>> print('KMNC of this test is : %s', model_fuzz_test.get_kmnc())
>>> print('NBC of this test is : %s', model_fuzz_test.get_nbc())
>>> print('SNAC of this test is : %s', model_fuzz_test.get_snac())
"""
def __init__(self, model, k, n, train_dataset):
......@@ -163,7 +173,7 @@ class ModelCoverageMetrics:
Get the metric of 'strong neuron activation coverage'.
Returns:
float: the metric of 'strong neuron activation coverage'.
float, the metric of 'strong neuron activation coverage'.
Examples:
>>> model_fuzz_test.get_snac()
......
......@@ -92,6 +92,18 @@ class GradWrapWithLoss(Cell):
"""
Construct a network to compute the gradient of loss function in input space
and weighted by `weight`.
Args:
network (Cell): The target network to wrap.
Examples:
>>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01)
>>> label = Tensor(np.ones([1, 10]).astype(np.float32))
>>> net = NET()
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
>>> loss_net = WithLossCell(net, loss_fn)
>>> grad_all = GradWrapWithLoss(loss_net)
>>> out_grad = grad_all(data, labels)
"""
def __init__(self, network):
......@@ -120,6 +132,19 @@ class GradWrap(Cell):
"""
Construct a network to compute the gradient of network outputs in input
space and weighted by `weight`, expressed as a jacobian matrix.
Args:
network (Cell): The target network to wrap.
Examples:
>>> data = Tensor(np.ones([1, 1, 32, 32]).astype(np.float32)*0.01)
>>> label = Tensor(np.ones([1, 10]).astype(np.float32))
>>> num_classes = 10
>>> sens = np.zeros((data.shape[0], num_classes)).astype(np.float32)
>>> sens[:, 1] = 1.0
>>> net = NET()
>>> wrap_net = GradWrap(net)
>>> wrap_net(data, Tensor(sens))
"""
def __init__(self, network):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册