提交 97724c2a 编写于 作者: G gx_wind

fix bugs and modify func param name

上级 bf1e0372
......@@ -18,22 +18,22 @@ class Attack(object):
def __init__(self, model):
self.model = model
def __call__(self, image_batch):
def __call__(self, image_label):
"""
Generate the adversarial sample.
Args:
image_batch(list): The image and label tuple list.
image_label(list): The image and label tuple list with one element.
"""
adv_img = self._apply(image_batch)
adv_img = self._apply(image_label)
return adv_img
@abstractmethod
def _apply(self, image_batch):
def _apply(self, image_label):
"""
Search an adversarial example.
Args:
image_batch(list): The image and label tuple list.
image_batch(list): The image and label tuple list with one element.
"""
raise NotImplementedError
......@@ -15,18 +15,19 @@ class GradientSignAttack(Attack):
Paper link: https://arxiv.org/abs/1412.6572
"""
def _apply(self, image_batch, epsilons=1000):
pre_label = np.argmax(self.model.predict(image_batch))
def _apply(self, image_label, epsilons=1000):
assert len(image_label) == 1
pre_label = np.argmax(self.model.predict(image_label))
min_, max_ = self.model.bounds()
gradient = self.model.gradient(image_batch)
gradient = self.model.gradient(image_label)
gradient_sign = np.sign(gradient) * (max_ - min_)
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 1, num=epsilons + 1)
for epsilon in epsilons:
adv_img = image_batch[0][0].reshape(
adv_img = image_label[0][0].reshape(
gradient_sign.shape) + epsilon * gradient_sign
adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict([(adv_img, 0)]))
......
......@@ -81,8 +81,7 @@ class Model(object):
Calculate the gradient of the cross-entropy loss w.r.t the image.
Args:
image(numpy.ndarray): image with shape (height, width, channel)
label(int): image label used to cal gradient.
image_batch(list): The image and label tuple list.
Return:
numpy.ndarray: gradient of the cross-entropy loss w.r.t the image with
......
......@@ -49,7 +49,7 @@ class PaddleModel(Model):
loss = self._program.block(0).var(self._cost_name)
param_grads = fluid.backward.append_backward(
loss, parameter_list=[self._input_name])
self._gradient = param_grads[0][1]
self._gradient = dict(param_grads)[self._input_name]
def predict(self, image_batch):
"""
......
......@@ -15,7 +15,6 @@ def mnist_cnn_model(img):
Returns:
Variable: the label prediction
"""
#conv1 = fluid.nets.conv2d()
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
num_filters=20,
......@@ -73,19 +72,15 @@ def main():
pass_acc = accuracy.eval(exe)
print("pass_id=" + str(pass_id) + " acc=" + str(acc) + " pass_acc="
+ str(pass_acc))
# print loss, acc
if loss < LOSS_THRESHOLD and pass_acc > ACC_THRESHOLD:
# if avg cost less than 10.0 and accuracy is larger than 0.9, we think our code is good.
break
# exit(0)
pass_acc = accuracy.eval(exe)
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc))
fluid.io.save_params(
exe, dirname='./mnist', main_program=fluid.default_main_program())
print('train mnist done')
exit(1)
if __name__ == '__main__':
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册