test_activation_op.py 23.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
Q
qijun 已提交
22 23


C
chengduo 已提交
24
class TestActivation(OpTest):
Q
qijun 已提交
25 26
    def setUp(self):
        self.op_type = "exp"
27 28
        self.dtype = np.float32
        self.init_dtype()
29
        self.init_kernel_type()
30 31 32 33 34 35

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
36 37 38 39 40

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
41 42
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
43
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
44

45
    def init_dtype(self):
C
chengduo 已提交
46
        self.dtype = np.float32
47

48 49 50
    def init_kernel_type(self):
        pass

Q
qijun 已提交
51

C
chengduo 已提交
52
class TestSigmoid(TestActivation):
Q
qijun 已提交
53 54
    def setUp(self):
        self.op_type = "sigmoid"
55 56 57 58 59 60 61
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
62

63
    def test_check_grad(self):
64 65 66 67
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

68

C
chengduo 已提交
69
class TestLogSigmoid(TestActivation):
70 71
    def setUp(self):
        self.op_type = "logsigmoid"
72 73 74 75 76 77 78
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
79 80

    def test_check_grad(self):
81 82
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
83
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
84 85


C
chengduo 已提交
86
class TestTanh(TestActivation):
87 88
    def setUp(self):
        self.op_type = "tanh"
89 90 91 92 93 94 95
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
96 97

    def test_check_grad(self):
98 99
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
100
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
101 102


103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
class TestAtan(TestActivation):
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
120
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
121 122
    def setUp(self):
        self.op_type = "tanh_shrink"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
135

136

C
chengduo 已提交
137
class TestHardShrink(TestActivation):
138 139
    def setUp(self):
        self.op_type = "hard_shrink"
140 141
        self.init_dtype()

142
        threshold = 0.5
143 144 145
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
146 147

        self.attrs = {'lambda': threshold}
148 149
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
150 151

    def test_check_grad(self):
152 153
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
154
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
155 156


C
chengduo 已提交
157
class TestSoftShrink(TestActivation):
158 159
    def setUp(self):
        self.op_type = "softshrink"
160 161
        self.init_dtype()

162
        lambda_val = 0.1
163 164 165 166 167
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

168
        self.attrs = {'lambda': lambda_val}
169 170
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
171 172

    def test_check_grad(self):
173 174
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
175
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
176

177

C
chengduo 已提交
178
class TestSqrt(TestActivation):
179 180
    def setUp(self):
        self.op_type = "sqrt"
181 182 183 184 185 186 187
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
188 189

    def test_check_grad(self):
190 191
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
192
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
193

194

Z
zhoukunsheng 已提交
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype)
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
212
class TestAbs(TestActivation):
213 214
    def setUp(self):
        self.op_type = "abs"
215 216 217
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
218
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
219
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
220
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
221 222
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
223 224 225 226
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
227 228

    def test_check_grad(self):
229 230
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
231
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
232

233

C
chengduo 已提交
234
class TestCeil(TestActivation):
D
dzhwinter 已提交
235 236
    def setUp(self):
        self.op_type = "ceil"
237 238 239 240 241 242 243
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
244

D
dzhwinter 已提交
245
    # The same reason with TestFloor
C
chengduo 已提交
246
    def test_check_grad(self):
247 248 249
        pass


C
chengduo 已提交
250
class TestFloor(TestActivation):
D
dzhwinter 已提交
251 252
    def setUp(self):
        self.op_type = "floor"
253 254 255 256 257 258 259
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
260

D
dzhwinter 已提交
261 262
    # the gradient on floor, ceil, round is undefined.
    # we return zero as gradient, but the numpy return nan 
C
chengduo 已提交
263 264
    # The same reason with TestFloor
    def test_check_grad(self):
265 266 267
        pass


C
chengduo 已提交
268
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
269 270
    def setUp(self):
        self.op_type = "cos"
271 272 273 274 275 276 277
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
278 279

    def test_check_grad(self):
280 281
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
282 283
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

284

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
302
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
303 304
    def setUp(self):
        self.op_type = "sin"
305 306 307 308 309 310 311
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
312 313

    def test_check_grad(self):
314 315
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
316 317 318
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
336
class TestRound(TestActivation):
D
dzhwinter 已提交
337 338
    def setUp(self):
        self.op_type = "round"
339 340 341 342 343 344 345
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
346

C
chengduo 已提交
347
    def test_check_grad(self):
348 349 350
        pass


C
chengduo 已提交
351
class TestRelu(TestActivation):
352
    def setUp(self):
Q
qijun 已提交
353
        self.op_type = "relu"
K
Kexin Zhao 已提交
354 355 356
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
357 358
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
359 360 361 362
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
363 364

    def test_check_grad(self):
K
Kexin Zhao 已提交
365 366
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
367
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
A
Adam 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
387 388


C
Clementine 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
406
class TestBRelu(TestActivation):
407 408
    def setUp(self):
        self.op_type = "brelu"
409 410 411
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
412 413
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
414 415
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
416
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
417 418 419
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
420 421 422

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
423
        self.outputs = {'Out': t}
424 425

    def test_check_grad(self):
426 427
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
428
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
429

430

C
chengduo 已提交
431
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
432
    def setUp(self):
433
        self.op_type = "relu6"
434 435 436
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
437 438 439 440
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
441
        out = np.minimum(np.maximum(x, 0), threshold)
442

443
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
444
        self.attrs = {'threshold': threshold}
445
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
446

447 448 449 450 451 452
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


H
huangjun12 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

        x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype)
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
477
class TestSoftRelu(TestActivation):
478 479
    def setUp(self):
        self.op_type = "soft_relu"
480 481 482
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
483
        threshold = 2.0
Q
qijun 已提交
484 485 486
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
487 488 489
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
490 491 492 493 494
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
495 496

    def test_check_grad(self):
497 498
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
499
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
500

501

C
chengduo 已提交
502
class TestELU(TestActivation):
503 504
    def setUp(self):
        self.op_type = "elu"
505 506 507
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
508
        alpha = 1.
509
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
510 511 512 513
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
514
        self.outputs = {'Out': out}
515 516

    def test_check_grad(self):
517 518
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
519
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
520 521


C
chengduo 已提交
522
class TestReciprocal(TestActivation):
Q
qijun 已提交
523 524
    def setUp(self):
        self.op_type = "reciprocal"
525 526 527 528 529 530 531
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
532 533

    def test_check_grad(self):
534 535
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
536
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
537 538


C
chengduo 已提交
539
class TestLog(TestActivation):
Q
qijun 已提交
540 541
    def setUp(self):
        self.op_type = "log"
542 543 544 545 546 547 548
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
549 550

    def test_check_grad(self):
551 552
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
553
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
554

555

C
chengduo 已提交
556
class TestSquare(TestActivation):
Q
qijun 已提交
557 558
    def setUp(self):
        self.op_type = "square"
559 560 561 562 563 564 565
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
566 567

    def test_check_grad(self):
568 569
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
570
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
571

572

C
chengduo 已提交
573
class TestPow(TestActivation):
574 575
    def setUp(self):
        self.op_type = "pow"
576 577 578 579 580 581
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
582
        self.attrs = {'factor': 3.0}
583
        self.outputs = {'Out': out}
584 585

    def test_check_grad(self):
586 587
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
588
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
589

590

C
chengduo 已提交
591
class TestSTanh(TestActivation):
592 593
    def setUp(self):
        self.op_type = "stanh"
594 595 596
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
597 598
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
599 600 601
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
602
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
603
        self.outputs = {'Out': out}
604

Q
qijun 已提交
605
    def test_check_grad(self):
606 607
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
608
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
609

610

C
chengduo 已提交
611
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
612 613
    def setUp(self):
        self.op_type = "softplus"
614
        self.init_dtype()
C
chengduo 已提交
615
        self.dtype = np.float64
616 617 618 619 620 621

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
622 623

    def test_check_grad(self):
624 625
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
626
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
627

628

C
chengduo 已提交
629
class TestSoftsign(TestActivation):
630 631
    def setUp(self):
        self.op_type = "softsign"
632 633 634 635 636 637 638
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
639 640

    def test_check_grad(self):
641 642
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
643
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
644 645


C
chengduo 已提交
646
class TestThresholdedRelu(TestActivation):
647 648
    def setUp(self):
        self.op_type = "thresholded_relu"
649 650
        self.init_dtype()

651 652
        threshold = 0.25
        self.relative_error = 0.005
653
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
654 655 656

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
657
        out = (X > threshold) * X
658

659
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
660
        self.attrs = {'threshold': threshold}
661
        self.outputs = {'Out': out}
662 663

    def test_check_grad(self):
664 665
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
666
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
667 668


C
chengduo 已提交
669
class TestHardSigmoid(TestActivation):
670 671
    def setUp(self):
        self.op_type = "hard_sigmoid"
672 673
        self.init_dtype()

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
689 690 691 692
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
693 694

    def test_check_grad(self):
695 696
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
697
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
698

699

C
chengduo 已提交
700
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
701 702
    def setUp(self):
        self.op_type = "swish"
703 704 705 706 707 708 709 710 711
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
712 713

    def test_check_grad(self):
714 715
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
716
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
717

718

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
738 739 740 741 742 743 744 745 746 747
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
748

C
chengduo 已提交
749
        def test_check_output(self):
750
            place = core.CUDAPlace(0)
C
chengduo 已提交
751 752 753
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
754

C
chengduo 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
779
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
780
create_test_act_fp16_class(TestSin)
781 782
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
783 784
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
785
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
800
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
801

Q
qijun 已提交
802 803
if __name__ == "__main__":
    unittest.main()