提交 8e8e5a89 编写于 作者: G gx_wind

fix coding standard

上级 bbb03fce
# Advbox # Advbox
Advbox is a Python toolbox to create adversarial examples that fool neural networks. It requires Python and paddle. Advbox is a Python toolbox to create adversarial examples that fool neural networks. It requires Python and paddle.
## How to use
1. train a model and save it's parameters. (like fluid_mnist.py)
2. load the parameters which is trained in step1, then reconstruct the model.(like mnist_tutorial_fgsm.py)
3. use advbox to generate the adversarial sample.
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """
A set of tools for generating adversarial example on paddle platform A set of tools for generating adversarial example on paddle platform
""" """
...@@ -7,6 +7,7 @@ import abc ...@@ -7,6 +7,7 @@ import abc
abstractmethod = abc.abstractmethod abstractmethod = abc.abstractmethod
class Attack(object): class Attack(object):
""" """
Abstract base class for adversarial attacks. `Attack` represent an adversarial attack Abstract base class for adversarial attacks. `Attack` represent an adversarial attack
......
...@@ -6,6 +6,7 @@ import numpy as np ...@@ -6,6 +6,7 @@ import numpy as np
from collections import Iterable from collections import Iterable
from .base import Attack from .base import Attack
class GradientSignAttack(Attack): class GradientSignAttack(Attack):
""" """
This attack was originally implemented by Goodfellow et al. (2015) with the This attack was originally implemented by Goodfellow et al. (2015) with the
...@@ -22,10 +23,11 @@ class GradientSignAttack(Attack): ...@@ -22,10 +23,11 @@ class GradientSignAttack(Attack):
gradient_sign = np.sign(gradient) * (max_ - min_) gradient_sign = np.sign(gradient) * (max_ - min_)
if not isinstance(epsilons, Iterable): if not isinstance(epsilons, Iterable):
epsilons = np.linspace(0, 1, num = epsilons + 1) epsilons = np.linspace(0, 1, num=epsilons + 1)
for epsilon in epsilons: for epsilon in epsilons:
adv_img = image_batch[0][0].reshape(gradient_sign.shape) + epsilon * gradient_sign adv_img = image_batch[0][0].reshape(
gradient_sign.shape) + epsilon * gradient_sign
adv_img = np.clip(adv_img, min_, max_) adv_img = np.clip(adv_img, min_, max_)
adv_label = np.argmax(self.model.predict([(adv_img, 0)])) adv_label = np.argmax(self.model.predict([(adv_img, 0)]))
#print("pre_label="+str(pre_label)+ " adv_label="+str(adv_label)) #print("pre_label="+str(pre_label)+ " adv_label="+str(adv_label))
...@@ -33,4 +35,5 @@ class GradientSignAttack(Attack): ...@@ -33,4 +35,5 @@ class GradientSignAttack(Attack):
#print(epsilon, pre_label, adv_label) #print(epsilon, pre_label, adv_label)
return adv_img return adv_img
FGSM = GradientSignAttack FGSM = GradientSignAttack
...@@ -6,8 +6,8 @@ import abc ...@@ -6,8 +6,8 @@ import abc
abstractmethod = abc.abstractmethod abstractmethod = abc.abstractmethod
class Model(object):
class Model(object):
""" """
Base class of model to provide attack. Base class of model to provide attack.
......
...@@ -7,6 +7,7 @@ from paddle.v2.fluid.framework import program_guard ...@@ -7,6 +7,7 @@ from paddle.v2.fluid.framework import program_guard
from .base import Model from .base import Model
class PaddleModel(Model): class PaddleModel(Model):
""" """
Create a PaddleModel instance. Create a PaddleModel instance.
...@@ -30,9 +31,7 @@ class PaddleModel(Model): ...@@ -30,9 +31,7 @@ class PaddleModel(Model):
channel_axis=3, channel_axis=3,
preprocess=None): preprocess=None):
super(PaddleModel, self).__init__( super(PaddleModel, self).__init__(
bounds=bounds, bounds=bounds, channel_axis=channel_axis, preprocess=preprocess)
channel_axis=channel_axis,
preprocess=preprocess)
if preprocess is None: if preprocess is None:
preprocess = (0, 1) preprocess = (0, 1)
...@@ -48,7 +47,8 @@ class PaddleModel(Model): ...@@ -48,7 +47,8 @@ class PaddleModel(Model):
# gradient # gradient
loss = self._program.block(0).var(self._cost_name) loss = self._program.block(0).var(self._cost_name)
param_grads = fluid.backward.append_backward(loss, parameter_list=[self._input_name]) param_grads = fluid.backward.append_backward(
loss, parameter_list=[self._input_name])
self._gradient = param_grads[0][1] self._gradient = param_grads[0][1]
def predict(self, image_batch): def predict(self, image_batch):
...@@ -63,14 +63,11 @@ class PaddleModel(Model): ...@@ -63,14 +63,11 @@ class PaddleModel(Model):
feeder = fluid.DataFeeder( feeder = fluid.DataFeeder(
feed_list=[self._input_name, self._logits_name], feed_list=[self._input_name, self._logits_name],
place=self._place, place=self._place,
program=self._program program=self._program)
)
predict_var = self._program.block(0).var(self._predict_name) predict_var = self._program.block(0).var(self._predict_name)
predict = self._exe.run( predict = self._exe.run(self._program,
self._program,
feed=feeder.feed(image_batch), feed=feeder.feed(image_batch),
fetch_list=[predict_var] fetch_list=[predict_var])
)
return predict return predict
def num_classes(self): def num_classes(self):
...@@ -96,11 +93,9 @@ class PaddleModel(Model): ...@@ -96,11 +93,9 @@ class PaddleModel(Model):
feeder = fluid.DataFeeder( feeder = fluid.DataFeeder(
feed_list=[self._input_name, self._logits_name], feed_list=[self._input_name, self._logits_name],
place=self._place, place=self._place,
program=self._program program=self._program)
)
grad, = self._exe.run( grad, = self._exe.run(self._program,
self._program,
feed=feeder.feed(image_batch), feed=feeder.feed(image_batch),
fetch_list=[self._gradient]) fetch_list=[self._gradient])
return grad return grad
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册