test_activation_nn_grad.py 10.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np

import paddle.fluid as fluid
21
import paddle
22 23 24
import paddle.fluid.layers as layers
import paddle.fluid.core as core
import gradient_checker
25
import paddle.nn.functional as F
26
from paddle.fluid.framework import _test_eager_guard
27 28 29 30

from decorator_helper import prog_scope


31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
class TestSigmoidTripleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.0005
        dtype = np.float64
        x = layers.data('x', shape, False, dtype=dtype)
        x.persistable = True
        y = layers.sigmoid(x)
        x_arr = np.random.random(shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.002
        gradient_checker.triple_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
46
        paddle.enable_static()
47 48 49 50 51 52 53
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
class TestSigmoidDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.0005
        dtype = np.float64
        x = layers.data('x', shape, False, dtype=dtype)
        x.persistable = True
        y = layers.sigmoid(x)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.002
        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
69
        paddle.enable_static()
70 71 72 73 74 75 76
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
class TestTanhTripleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.0005
        dtype = np.float64
        x = layers.data('x', shape, False, dtype=dtype)
        x.persistable = True
        y = layers.tanh(x)
        x_arr = np.random.random(shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.002
        gradient_checker.triple_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
92
        paddle.enable_static()
93 94 95 96 97 98 99
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
class TestTanhDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.0005
        dtype = np.float64
        x = layers.data('x', shape, False, dtype=dtype)
        x.persistable = True
        y = paddle.tanh(x)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.002
        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
115
        paddle.enable_static()
116 117 118 119 120 121 122
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
class TestReluDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.005
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True
        y = layers.relu(x)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.02

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
140
        paddle.enable_static()
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


class TestLeakyReluDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.005
        alpha = 0.2
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True

        y = layers.leaky_relu(x, alpha=alpha)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        x_arr[np.abs(x_arr) < 0.005] = 0.02

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
167
        paddle.enable_static()
168 169 170 171 172 173 174
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places = [fluid.CUDAPlace(0)]
        for p in places:
            self.func(p)


D
Double_V 已提交
175 176 177
class TestELUDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
178
        shape = [2, 4, 4, 4]
D
Double_V 已提交
179
        eps = 1e-6
180
        alpha = 0.2
D
Double_V 已提交
181
        dtype = np.float64
182
        SEED = 0
D
Double_V 已提交
183 184 185 186 187

        x = layers.data('x', shape, False, dtype)
        x.persistable = True

        y = layers.elu(x, alpha=alpha)
188
        np.random.RandomState(SEED)
D
Double_V 已提交
189 190 191 192 193
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
194
        paddle.enable_static()
D
Double_V 已提交
195 196 197 198 199 200 201
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
class TestCELUDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 4, 4, 4]
        eps = 1e-6
        alpha = 0.2
        dtype = np.float64
        SEED = 0

        x = layers.data('x', shape, False, dtype)
        x.persistable = True

        y = F.celu(x, alpha=alpha)
        np.random.RandomState(SEED)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
221
        paddle.enable_static()
222 223 224 225 226 227 228
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
class TestSqrtDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.0001
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True

        y = layers.sqrt(x)
        x_arr = np.random.uniform(0.1, 1, shape).astype(dtype)

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
246
        paddle.enable_static()
247 248 249 250 251 252 253
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places = [fluid.CUDAPlace(0)]
        for p in places:
            self.func(p)


W
whs 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
class TestRsqrtDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 0.0001
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True

        y = layers.rsqrt(x)
        x_arr = np.random.uniform(0.1, 1, shape).astype(dtype)

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
271
        paddle.enable_static()
W
whs 已提交
272 273 274 275 276 277 278
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places = [fluid.CUDAPlace(0)]
        for p in places:
            self.func(p)


279 280 281
class TestSquareDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
T
tianshuo78520a 已提交
282
        # the shape of input variable should be clearly specified, not inlcude -1.
283 284 285 286 287 288 289 290 291 292 293 294 295
        shape = [2, 3, 7, 9]
        eps = 0.005
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True
        y = layers.square(x)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
296
        paddle.enable_static()
297 298 299 300 301 302 303
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


Z
Zhong Hui 已提交
304 305 306 307 308 309 310 311 312 313 314 315
class TestAbsDoubleGradCheck(unittest.TestCase):
    @prog_scope()
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        shape = [2, 3, 7, 9]
        eps = 1e-6
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True
        y = layers.abs(x)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
Z
Zhong Hui 已提交
316 317 318 319
        # Because we set delta = 0.005 in calculating numeric gradient,
        # if x is too small, the numeric gradient is inaccurate.
        # we should avoid this
        x_arr[np.abs(x_arr) < 0.005] = 0.02
Z
Zhong Hui 已提交
320 321 322 323 324

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)

    def test_grad(self):
325
        paddle.enable_static()
Z
Zhong Hui 已提交
326 327 328 329 330 331 332
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


333
class TestLogDoubleGradCheck(unittest.TestCase):
334 335 336
    def log_wrapper(self, x):
        return paddle.log(x[0])

337 338 339 340 341 342 343 344 345 346 347 348 349 350
    @prog_scope()
    def func(self, place):
        shape = [2, 3, 7, 9]
        eps = 1e-6
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        x.persistable = True
        y = layers.log(x)

        x_arr = np.random.uniform(0.1, 1, shape).astype(dtype)

        gradient_checker.double_grad_check(
            [x], y, x_init=x_arr, place=place, eps=eps)
351 352
        gradient_checker.double_grad_check_for_dygraph(
            self.log_wrapper, [x], y, x_init=x_arr, place=place)
353 354

    def test_grad(self):
355
        paddle.enable_static()
356 357 358 359 360 361 362
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))
        for p in places:
            self.func(p)


363 364
if __name__ == "__main__":
    unittest.main()