提交 eaf2d555 编写于 作者: wgzqz's avatar wgzqz

Remove batch predict. We'll add a new batch_predict function if needed.

上级 2bf99129
...@@ -54,7 +54,7 @@ class Attack(object): ...@@ -54,7 +54,7 @@ class Attack(object):
""" """
if adversary.original_label is None: if adversary.original_label is None:
adversary.original_label = np.argmax( adversary.original_label = np.argmax(
self.model.predict([(adversary.original, 0)])) self.model.predict(adversary.original))
if adversary.is_targeted_attack and adversary.target_label is None: if adversary.is_targeted_attack and adversary.target_label is None:
if adversary.target is None: if adversary.target is None:
raise ValueError( raise ValueError(
...@@ -62,7 +62,8 @@ class Attack(object): ...@@ -62,7 +62,8 @@ class Attack(object):
'adversary.target_label or adversary.target must be set.') 'adversary.target_label or adversary.target must be set.')
else: else:
adversary.target_label_label = np.argmax( adversary.target_label_label = np.argmax(
self.model.predict([(adversary.target_label, 0)])) self.model.predict(
self.model.scale_input(adversary.target)))
logging.info('adversary:\noriginal_label: {}' logging.info('adversary:\noriginal_label: {}'
'\n target_lable: {}' '\n target_lable: {}'
......
...@@ -33,7 +33,7 @@ class DeepFoolAttack(Attack): ...@@ -33,7 +33,7 @@ class DeepFoolAttack(Attack):
pre_label = adversary.original_label pre_label = adversary.original_label
min_, max_ = self.model.bounds() min_, max_ = self.model.bounds()
f = self.model.predict([(adversary.original, 0)]) f = self.model.predict(adversary.original)
if adversary.is_targeted_attack: if adversary.is_targeted_attack:
labels = [adversary.target_label] labels = [adversary.target_label]
else: else:
...@@ -44,7 +44,7 @@ class DeepFoolAttack(Attack): ...@@ -44,7 +44,7 @@ class DeepFoolAttack(Attack):
else: else:
labels = np.arange(class_count) labels = np.arange(class_count)
gradient = self.model.gradient([(adversary.original, pre_label)]) gradient = self.model.gradient(adversary.original, pre_label)
x = adversary.original x = adversary.original
for iteration in xrange(iterations): for iteration in xrange(iterations):
w = np.inf w = np.inf
...@@ -53,7 +53,7 @@ class DeepFoolAttack(Attack): ...@@ -53,7 +53,7 @@ class DeepFoolAttack(Attack):
for k in labels: for k in labels:
if k == pre_label: if k == pre_label:
continue continue
gradient_k = self.model.gradient([(x, k)]) gradient_k = self.model.gradient(x, k)
w_k = gradient_k - gradient w_k = gradient_k - gradient
f_k = f[k] - f[pre_label] f_k = f[k] - f[pre_label]
w_k_norm = np.linalg.norm(w_k) + 1e-8 w_k_norm = np.linalg.norm(w_k) + 1e-8
...@@ -67,8 +67,8 @@ class DeepFoolAttack(Attack): ...@@ -67,8 +67,8 @@ class DeepFoolAttack(Attack):
x = x + (1 + overshoot) * r_i x = x + (1 + overshoot) * r_i
x = np.clip(x, min_, max_) x = np.clip(x, min_, max_)
f = self.model.predict([(x, 0)]) f = self.model.predict(x)
gradient = self.model.gradient([(x, pre_label)]) gradient = self.model.gradient(x, pre_label)
adv_label = np.argmax(f) adv_label = np.argmax(f)
logging.info('iteration = {}, f = {}, pre_label = {}' logging.info('iteration = {}, f = {}, pre_label = {}'
', adv_label={}'.format(iteration, f[pre_label], ', adv_label={}'.format(iteration, f[pre_label],
......
...@@ -37,18 +37,18 @@ class GradientSignAttack(Attack): ...@@ -37,18 +37,18 @@ class GradientSignAttack(Attack):
min_, max_ = self.model.bounds() min_, max_ = self.model.bounds()
if adversary.is_targeted_attack: if adversary.is_targeted_attack:
gradient = self.model.gradient([(adversary.original, gradient = self.model.gradient(adversary.original,
adversary.target_label)]) adversary.target_label)
gradient_sign = -np.sign(gradient) * (max_ - min_) gradient_sign = -np.sign(gradient) * (max_ - min_)
else: else:
gradient = self.model.gradient([(adversary.original, gradient = self.model.gradient(adversary.original,
adversary.original_label)]) adversary.original_label)
gradient_sign = np.sign(gradient) * (max_ - min_) gradient_sign = np.sign(gradient) * (max_ - min_)
for epsilon in epsilons: for epsilon in epsilons:
adv_img = adversary.original + epsilon * gradient_sign adv_img = adversary.original + epsilon * gradient_sign
adv_img = np.clip(adv_img, min_, max_) adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict([(adv_img, 0)])) adv_label = np.argmax(self.model.predict(adv_img))
logging.info('epsilon = {:.3f}, pre_label = {}, adv_label={}'. logging.info('epsilon = {:.3f}, pre_label = {}, adv_label={}'.
format(epsilon, pre_label, adv_label)) format(epsilon, pre_label, adv_label))
if adversary.try_accept_the_example(adv_img, adv_label): if adversary.try_accept_the_example(adv_img, adv_label):
......
...@@ -38,16 +38,16 @@ class IteratorGradientSignAttack(Attack): ...@@ -38,16 +38,16 @@ class IteratorGradientSignAttack(Attack):
adv_img = adversary.original adv_img = adversary.original
for _ in range(steps): for _ in range(steps):
if adversary.is_targeted_attack: if adversary.is_targeted_attack:
gradient = self.model.gradient([(adversary.original, gradient = self.model.gradient(adversary.original,
adversary.target_label)]) adversary.target_label)
gradient_sign = -np.sign(gradient) * (max_ - min_) gradient_sign = -np.sign(gradient) * (max_ - min_)
else: else:
gradient = self.model.gradient([(adversary.original, gradient = self.model.gradient(adversary.original,
adversary.original_label)]) adversary.original_label)
gradient_sign = np.sign(gradient) * (max_ - min_) gradient_sign = np.sign(gradient) * (max_ - min_)
adv_img = adv_img + gradient_sign * epsilon adv_img = adv_img + gradient_sign * epsilon
adv_img = np.clip(adv_img, min_, max_) adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict([(adv_img, 0)])) adv_label = np.argmax(self.model.predict(adv_img))
logging.info('epsilon = {:.3f}, pre_label = {}, adv_label={}'. logging.info('epsilon = {:.3f}, pre_label = {}, adv_label={}'.
format(epsilon, pre_label, adv_label)) format(epsilon, pre_label, adv_label))
if adversary.try_accept_the_example(adv_img, adv_label): if adversary.try_accept_the_example(adv_img, adv_label):
......
...@@ -43,26 +43,31 @@ class Model(object): ...@@ -43,26 +43,31 @@ class Model(object):
return self._channel_axis return self._channel_axis
def _process_input(self, input_): def _process_input(self, input_):
res = input_ res = None
sub, div = self._preprocess sub, div = self._preprocess
if np.any(sub != 0): if np.any(sub != 0):
res = input_ - sub res = input_ - sub
assert np.any(div != 0) assert np.any(div != 0)
if np.any(div != 1): if np.any(div != 1):
if res is None: # "res = input_ - sub" is not executed!
res = input_ / div
else:
res /= div res /= div
if res is None: # "res = (input_ - sub)/ div" is not executed!
return input_
return res return res
@abstractmethod @abstractmethod
def predict(self, image_batch): def predict(self, data):
""" """
Calculate the prediction of the image batch. Calculate the prediction of the data.
Args: Args:
image_batch(numpy.ndarray): image batch of shape (batch_size, data(numpy.ndarray): input data with shape (size,
height, width, channels). height, width, channels).
Return: Return:
numpy.ndarray: predictions of the images with shape (batch_size, numpy.ndarray: predictions of the data with shape (batch_size,
num_of_classes). num_of_classes).
""" """
raise NotImplementedError raise NotImplementedError
...@@ -78,12 +83,14 @@ class Model(object): ...@@ -78,12 +83,14 @@ class Model(object):
raise NotImplementedError raise NotImplementedError
@abstractmethod @abstractmethod
def gradient(self, image_batch): def gradient(self, data, label):
""" """
Calculate the gradient of the cross-entropy loss w.r.t the image. Calculate the gradient of the cross-entropy loss w.r.t the image.
Args: Args:
image_batch(list): The image and label tuple list. data(numpy.ndarray): input data with shape (size, height, width,
channels).
label(int): Label used to calculate the gradient.
Return: Return:
numpy.ndarray: gradient of the cross-entropy loss w.r.t the image numpy.ndarray: gradient of the cross-entropy loss w.r.t the image
......
...@@ -3,6 +3,7 @@ Paddle model ...@@ -3,6 +3,7 @@ Paddle model
""" """
from __future__ import absolute_import from __future__ import absolute_import
import numpy as np
import paddle.v2.fluid as fluid import paddle.v2.fluid as fluid
from .base import Model from .base import Model
...@@ -54,24 +55,28 @@ class PaddleModel(Model): ...@@ -54,24 +55,28 @@ class PaddleModel(Model):
self._gradient = filter(lambda p: p[0].name == self._input_name, self._gradient = filter(lambda p: p[0].name == self._input_name,
param_grads)[0][1] param_grads)[0][1]
def predict(self, image_batch): def predict(self, data):
""" """
Predict the label of the image_batch. Calculate the prediction of the data.
Args: Args:
image_batch(list): The image and label tuple list. data(numpy.ndarray): input data with shape (size,
height, width, channels).
Return: Return:
numpy.ndarray: predictions of the images with shape (batch_size, numpy.ndarray: predictions of the data with shape (batch_size,
num_of_classes). num_of_classes).
""" """
scaled_data = self._process_input(data)
feeder = fluid.DataFeeder( feeder = fluid.DataFeeder(
feed_list=[self._input_name, self._logits_name], feed_list=[self._input_name, self._logits_name],
place=self._place, place=self._place,
program=self._program) program=self._program)
predict_var = self._program.block(0).var(self._predict_name) predict_var = self._program.block(0).var(self._predict_name)
predict = self._exe.run(self._program, predict = self._exe.run(self._program,
feed=feeder.feed(image_batch), feed=feeder.feed([(scaled_data, 0)]),
fetch_list=[predict_var]) fetch_list=[predict_var])
predict = np.squeeze(predict, axis=0)
return predict return predict
def num_classes(self): def num_classes(self):
...@@ -85,21 +90,27 @@ class PaddleModel(Model): ...@@ -85,21 +90,27 @@ class PaddleModel(Model):
assert len(predict_var.shape) == 2 assert len(predict_var.shape) == 2
return predict_var.shape[1] return predict_var.shape[1]
def gradient(self, image_batch): def gradient(self, data, label):
""" """
Calculate the gradient of the loss w.r.t the input. Calculate the gradient of the cross-entropy loss w.r.t the image.
Args: Args:
image_batch(list): The image and label tuple list. data(numpy.ndarray): input data with shape (size, height, width,
channels).
label(int): Label used to calculate the gradient.
Return: Return:
list: The list of the gradient of the image. numpy.ndarray: gradient of the cross-entropy loss w.r.t the image
with the shape (height, width, channel).
""" """
scaled_data = self._process_input(data)
feeder = fluid.DataFeeder( feeder = fluid.DataFeeder(
feed_list=[self._input_name, self._logits_name], feed_list=[self._input_name, self._logits_name],
place=self._place, place=self._place,
program=self._program) program=self._program)
grad, = self._exe.run(self._program, grad, = self._exe.run(self._program,
feed=feeder.feed(image_batch), feed=feeder.feed([(scaled_data, label)]),
fetch_list=[self._gradient]) fetch_list=[self._gradient])
return grad.reshape(image_batch[0][0].shape) return grad.reshape(data.shape)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册