提交 8e8e5a89 编写于 作者: G gx_wind

fix coding standard

上级 bbb03fce
# Advbox
Advbox is a Python toolbox to create adversarial examples that fool neural networks. It requires Python and paddle.
\ No newline at end of file
Advbox is a Python toolbox to create adversarial examples that fool neural networks. It requires Python and paddle.
## How to use
1. train a model and save it's parameters. (like fluid_mnist.py)
2. load the parameters which is trained in step1, then reconstruct the model.(like mnist_tutorial_fgsm.py)
3. use advbox to generate the adversarial sample.
......@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A set of tools for generating adversarial example on paddle platform
"""
......@@ -7,6 +7,7 @@ import abc
abstractmethod = abc.abstractmethod
class Attack(object):
"""
Abstract base class for adversarial attacks. `Attack` represent an adversarial attack
......
......@@ -5,7 +5,8 @@ from __future__ import division
import numpy as np
from collections import Iterable
from .base import Attack
class GradientSignAttack(Attack):
"""
This attack was originally implemented by Goodfellow et al. (2015) with the
......@@ -22,10 +23,11 @@ class GradientSignAttack(Attack):
gradient_sign = np.sign(gradient) * (max_ - min_)
if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 1, num = epsilons + 1)
epsilons = np.linspace(0, 1, num=epsilons + 1)
for epsilon in epsilons:
adv_img = image_batch[0][0].reshape(gradient_sign.shape) + epsilon * gradient_sign
adv_img = image_batch[0][0].reshape(
gradient_sign.shape) + epsilon * gradient_sign
adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict([(adv_img, 0)]))
#print("pre_label="+str(pre_label)+ " adv_label="+str(adv_label))
......@@ -33,4 +35,5 @@ class GradientSignAttack(Attack):
#print(epsilon, pre_label, adv_label)
return adv_img
FGSM = GradientSignAttack
......@@ -6,8 +6,8 @@ import abc
abstractmethod = abc.abstractmethod
class Model(object):
class Model(object):
"""
Base class of model to provide attack.
......
......@@ -7,6 +7,7 @@ from paddle.v2.fluid.framework import program_guard
from .base import Model
class PaddleModel(Model):
"""
Create a PaddleModel instance.
......@@ -30,9 +31,7 @@ class PaddleModel(Model):
channel_axis=3,
preprocess=None):
super(PaddleModel, self).__init__(
bounds=bounds,
channel_axis=channel_axis,
preprocess=preprocess)
bounds=bounds, channel_axis=channel_axis, preprocess=preprocess)
if preprocess is None:
preprocess = (0, 1)
......@@ -48,7 +47,8 @@ class PaddleModel(Model):
# gradient
loss = self._program.block(0).var(self._cost_name)
param_grads = fluid.backward.append_backward(loss, parameter_list=[self._input_name])
param_grads = fluid.backward.append_backward(
loss, parameter_list=[self._input_name])
self._gradient = param_grads[0][1]
def predict(self, image_batch):
......@@ -61,16 +61,13 @@ class PaddleModel(Model):
numpy.ndarray: predictions of the images with shape (batch_size, num_of_classes).
"""
feeder = fluid.DataFeeder(
feed_list=[self._input_name, self._logits_name],
place=self._place,
program=self._program
)
feed_list=[self._input_name, self._logits_name],
place=self._place,
program=self._program)
predict_var = self._program.block(0).var(self._predict_name)
predict = self._exe.run(
self._program,
feed=feeder.feed(image_batch),
fetch_list=[predict_var]
)
predict = self._exe.run(self._program,
feed=feeder.feed(image_batch),
fetch_list=[predict_var])
return predict
def num_classes(self):
......@@ -95,12 +92,10 @@ class PaddleModel(Model):
"""
feeder = fluid.DataFeeder(
feed_list=[self._input_name, self._logits_name],
place=self._place,
program=self._program
)
grad, = self._exe.run(
self._program,
feed=feeder.feed(image_batch),
fetch_list=[self._gradient])
place=self._place,
program=self._program)
grad, = self._exe.run(self._program,
feed=feeder.feed(image_batch),
fetch_list=[self._gradient])
return grad
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册