diff --git a/.copyright.hook b/.copyright.hook
index e28e88e2664b25b5aecce6660470c485b20a1a6e..de97ce90ac4a80d8d0532ea8898385b2e5781ce0 100644
--- a/.copyright.hook
+++ b/.copyright.hook
@@ -90,7 +90,7 @@ def main(argv=None):
retv = 0
for filename in args.filenames:
first_line = io.open(filename).readline()
- if "Copyright" in first_line: continue
+ if "COPYRIGHT" in first_line.upper() : continue
original_contents = io.open(filename).read()
new_contents = generate_copyright(
COPYRIGHT, lang_type(filename)) + original_contents
diff --git a/adversarial/advbox/attacks/gradientsign.py b/adversarial/advbox/attacks/gradientsign.py
index 77d93bd793936abbaae0302050e2bcf714adfa1a..cc26ffb69020a87f559c537f03de84f7c2bea2de 100644
--- a/adversarial/advbox/attacks/gradientsign.py
+++ b/adversarial/advbox/attacks/gradientsign.py
@@ -49,3 +49,39 @@ class GradientSignAttack(Attack):
FGSM = GradientSignAttack
+
+
+class IteratorGradientSignAttack(Attack):
+ """
+ This attack was originally implemented by Alexey Kurakin(Google Brain).
+ Paper link: https://arxiv.org/pdf/1607.02533.pdf
+ """
+
+ def _apply(self, image_label, epsilons=100, steps=10):
+ """
+ Apply the iterative gradient sign attack.
+ Args:
+ image_label(list): The image and label tuple list of one element.
+ epsilons(list|tuple|int): The epsilon (input variation parameter).
+ steps(int): The number of iterator steps.
+ Return:
+ numpy.ndarray: The adversarail sample generated by the algorithm.
+ """
+ assert len(image_label) == 1
+ pre_label = np.argmax(self.model.predict(image_label))
+ gradient = self.model.gradient(image_label)
+ min_, max_ = self.model.bounds()
+
+ if not isinstance(epsilons, Iterable):
+ epsilons = np.linspace(0, 1, num=epsilons + 1)
+
+ for epsilon in epsilons:
+ adv_img = image_label[0][0].reshape(gradient.shape)
+ for _ in range(steps):
+ gradient = self.model.gradient([(adv_img, image_label[0][1])])
+ gradient_sign = np.sign(gradient) * (max_ - min_)
+ adv_img = adv_img + epsilon * gradient_sign
+ adv_img = np.clip(adv_img, min_, max_)
+ adv_label = np.argmax(self.model.predict([(adv_img, 0)]))
+ if pre_label != adv_label:
+ return adv_img
diff --git a/benchmark/tensorflow/image/googlenet_multi_gpu.py b/benchmark/tensorflow/image/googlenet_multi_gpu.py
index 31466faa37c47c66e4fe4628e28c867875e89f2e..44de3800a8a42d90debae2c567795789f3eb0a7d 100644
--- a/benchmark/tensorflow/image/googlenet_multi_gpu.py
+++ b/benchmark/tensorflow/image/googlenet_multi_gpu.py
@@ -1,3 +1,16 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
+#
+#Licensed under the Apache License, Version 2.0 (the "License");
+#you may not use this file except in compliance with the License.
+#You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#See the License for the specific language governing permissions and
+#limitations under the License.
from six.moves import xrange # pylint: disable=redefined-builtin
from datetime import datetime
import math
diff --git a/doc/getstarted/concepts/src/infer.py b/doc/getstarted/concepts/src/infer.py
index 4cc58dfee0bd6dade0340b4fd0ee1adb49ffebf6..ee71cd7a9a4fbddb93fa3aa2d9349f01f3673982 100644
--- a/doc/getstarted/concepts/src/infer.py
+++ b/doc/getstarted/concepts/src/infer.py
@@ -1,3 +1,16 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
+#
+#Licensed under the Apache License, Version 2.0 (the "License");
+#you may not use this file except in compliance with the License.
+#You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+#Unless required by applicable law or agreed to in writing, software
+#distributed under the License is distributed on an "AS IS" BASIS,
+#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#See the License for the specific language governing permissions and
+#limitations under the License.
import paddle.v2 as paddle
import numpy as np
diff --git a/paddle/operators/op_documentation/batch_norm_op.md b/paddle/operators/op_documentation/batch_norm_op.md
index 80948adf2b9047a9685dbdd90b2296b5a955f9c1..d1392619c42d9206bf4bddcd33ad11b033e6cbdb 100644
--- a/paddle/operators/op_documentation/batch_norm_op.md
+++ b/paddle/operators/op_documentation/batch_norm_op.md
@@ -66,7 +66,7 @@ As most C++ operators do, `batch_norm_op` is defined by inputs, outputs, attribu
The following graph showes the training computational process of `batch_norm_op`:
-
+
cudnn provides APIs to finish the whole series of computation, we can use them in our GPU kernel.
@@ -124,7 +124,7 @@ for pass_id in range(PASS_NUM):
`is_infer` is an attribute. Once an operator is created, its attributes can not be changed. It suggests us that we shall maintain two `batch_norm_op` in the model, one's `is_infer` is `True`(we call it `infer_batch_norm_op`) and the other one's is `False`(we call it `train_batch_norm_op`). They share all parameters and variables, but be placed in two different branches. That is to say, if a network contains a `batch_norm_op`, it will fork into two branches, one go through `train_batch_norm_op` and the other one go through `infer_batch_norm_op`: