test_activation_op.py 24.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
Q
qijun 已提交
22 23


C
chengduo 已提交
24
class TestActivation(OpTest):
Q
qijun 已提交
25 26
    def setUp(self):
        self.op_type = "exp"
27 28
        self.dtype = np.float32
        self.init_dtype()
29
        self.init_kernel_type()
30 31 32 33 34 35

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
36 37 38 39 40

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
41 42
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
43
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
44

45
    def init_dtype(self):
C
chengduo 已提交
46
        self.dtype = np.float32
47

48 49 50
    def init_kernel_type(self):
        pass

Q
qijun 已提交
51

C
chengduo 已提交
52
class TestSigmoid(TestActivation):
Q
qijun 已提交
53 54
    def setUp(self):
        self.op_type = "sigmoid"
55 56 57 58 59 60 61
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
62

63
    def test_check_grad(self):
64 65 66 67
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

68

C
chengduo 已提交
69
class TestLogSigmoid(TestActivation):
70 71
    def setUp(self):
        self.op_type = "logsigmoid"
72 73 74 75 76 77 78
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
79 80

    def test_check_grad(self):
81 82
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
83
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
84 85


C
chengduo 已提交
86
class TestTanh(TestActivation):
87 88
    def setUp(self):
        self.op_type = "tanh"
89 90 91 92 93 94 95
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
96 97

    def test_check_grad(self):
98 99
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
100
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
101 102


103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
class TestAtan(TestActivation):
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
120
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
121 122
    def setUp(self):
        self.op_type = "tanh_shrink"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
135

136

C
chengduo 已提交
137
class TestHardShrink(TestActivation):
138 139
    def setUp(self):
        self.op_type = "hard_shrink"
140 141
        self.init_dtype()

142
        threshold = 0.5
143 144 145
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
146 147

        self.attrs = {'lambda': threshold}
148 149
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
150 151

    def test_check_grad(self):
152 153
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
154
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
155 156


C
chengduo 已提交
157
class TestSoftShrink(TestActivation):
158 159
    def setUp(self):
        self.op_type = "softshrink"
160 161
        self.init_dtype()

162
        lambda_val = 0.1
163 164 165 166 167
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

168
        self.attrs = {'lambda': lambda_val}
169 170
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
171 172

    def test_check_grad(self):
173 174
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
175
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
176

177

C
chengduo 已提交
178
class TestSqrt(TestActivation):
179 180
    def setUp(self):
        self.op_type = "sqrt"
181 182 183 184 185 186 187
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
188 189

    def test_check_grad(self):
190 191
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
192
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
193

194

Z
zhoukunsheng 已提交
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype)
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
212
class TestAbs(TestActivation):
213 214
    def setUp(self):
        self.op_type = "abs"
215 216 217
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
218
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
219
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
220
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
221 222
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
223 224 225 226
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
227 228

    def test_check_grad(self):
229 230
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
231
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
232

233

C
chengduo 已提交
234
class TestCeil(TestActivation):
D
dzhwinter 已提交
235 236
    def setUp(self):
        self.op_type = "ceil"
237 238 239 240 241 242 243
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
244

D
dzhwinter 已提交
245
    # The same reason with TestFloor
C
chengduo 已提交
246
    def test_check_grad(self):
247 248 249
        pass


C
chengduo 已提交
250
class TestFloor(TestActivation):
D
dzhwinter 已提交
251 252
    def setUp(self):
        self.op_type = "floor"
253 254 255 256 257 258 259
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
260

D
dzhwinter 已提交
261
    # the gradient on floor, ceil, round is undefined.
262
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
263 264
    # The same reason with TestFloor
    def test_check_grad(self):
265 266 267
        pass


C
chengduo 已提交
268
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
269 270
    def setUp(self):
        self.op_type = "cos"
271 272 273 274 275 276 277
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
278 279

    def test_check_grad(self):
280 281
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
282 283
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

284

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
302
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
303 304
    def setUp(self):
        self.op_type = "sin"
305 306 307 308 309 310 311
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
312 313

    def test_check_grad(self):
314 315
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
316 317 318
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
336
class TestRound(TestActivation):
D
dzhwinter 已提交
337 338
    def setUp(self):
        self.op_type = "round"
339 340 341 342 343 344 345
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
346

C
chengduo 已提交
347
    def test_check_grad(self):
348 349 350
        pass


C
chengduo 已提交
351
class TestRelu(TestActivation):
352
    def setUp(self):
Q
qijun 已提交
353
        self.op_type = "relu"
K
Kexin Zhao 已提交
354 355 356
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
357 358
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
359 360 361 362
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
363 364

    def test_check_grad(self):
K
Kexin Zhao 已提交
365 366
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
367
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
A
Adam 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
387 388


C
Clementine 已提交
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
406
class TestBRelu(TestActivation):
407 408
    def setUp(self):
        self.op_type = "brelu"
409 410 411
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
412 413
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
414 415
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
416
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
417 418 419
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
420 421 422

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
423
        self.outputs = {'Out': t}
424 425

    def test_check_grad(self):
426 427
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
428
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
429

430

C
chengduo 已提交
431
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
432
    def setUp(self):
433
        self.op_type = "relu6"
434 435 436
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
437 438 439 440
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
441
        out = np.minimum(np.maximum(x, 0), threshold)
442

443
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
444
        self.attrs = {'threshold': threshold}
445
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
446

447 448 449 450 451 452
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


H
huangjun12 已提交
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

        x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype)
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
477
class TestSoftRelu(TestActivation):
478 479
    def setUp(self):
        self.op_type = "soft_relu"
480 481 482
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
483
        threshold = 2.0
Q
qijun 已提交
484 485 486
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
487 488 489
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
490 491 492 493 494
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
495 496

    def test_check_grad(self):
497 498
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
499
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
500

501

C
chengduo 已提交
502
class TestELU(TestActivation):
503 504
    def setUp(self):
        self.op_type = "elu"
505 506 507
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
508
        alpha = 1.
509
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
510 511 512 513
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
514
        self.outputs = {'Out': out}
515 516

    def test_check_grad(self):
517 518
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
519
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
520 521


C
chengduo 已提交
522
class TestReciprocal(TestActivation):
Q
qijun 已提交
523 524
    def setUp(self):
        self.op_type = "reciprocal"
525 526 527 528 529 530 531
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
532 533

    def test_check_grad(self):
534 535
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
536
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
537 538


C
chengduo 已提交
539
class TestLog(TestActivation):
Q
qijun 已提交
540 541
    def setUp(self):
        self.op_type = "log"
542 543 544 545 546 547 548
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
549 550

    def test_check_grad(self):
551 552
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
553
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
554

555

C
chengduo 已提交
556
class TestSquare(TestActivation):
Q
qijun 已提交
557 558
    def setUp(self):
        self.op_type = "square"
559 560 561 562 563 564 565
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
566 567

    def test_check_grad(self):
568 569
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
570
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
571

572

C
chengduo 已提交
573
class TestPow(TestActivation):
574 575
    def setUp(self):
        self.op_type = "pow"
576 577 578 579 580 581
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
582
        self.attrs = {'factor': 3.0}
583
        self.outputs = {'Out': out}
584 585

    def test_check_grad(self):
586 587
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
588
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
589

590

591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)

    def test_api(self):
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2 = exe.run(fluid.default_main_program(),
                               feed={"x": input},
                               fetch_list=[out_1, out_2])

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))


C
chengduo 已提交
636
class TestSTanh(TestActivation):
637 638
    def setUp(self):
        self.op_type = "stanh"
639 640 641
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
642 643
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
644 645 646
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
647
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
648
        self.outputs = {'Out': out}
649

Q
qijun 已提交
650
    def test_check_grad(self):
651 652
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
653
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
654

655

C
chengduo 已提交
656
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
657 658
    def setUp(self):
        self.op_type = "softplus"
659
        self.init_dtype()
C
chengduo 已提交
660
        self.dtype = np.float64
661 662 663 664 665 666

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
667 668

    def test_check_grad(self):
669 670
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
671
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
672

673

C
chengduo 已提交
674
class TestSoftsign(TestActivation):
675 676
    def setUp(self):
        self.op_type = "softsign"
677 678 679 680 681 682 683
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
684 685

    def test_check_grad(self):
686 687
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
688
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
689 690


C
chengduo 已提交
691
class TestThresholdedRelu(TestActivation):
692 693
    def setUp(self):
        self.op_type = "thresholded_relu"
694 695
        self.init_dtype()

696 697
        threshold = 0.25
        self.relative_error = 0.005
698
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
699 700 701

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
702
        out = (X > threshold) * X
703

704
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
705
        self.attrs = {'threshold': threshold}
706
        self.outputs = {'Out': out}
707 708

    def test_check_grad(self):
709 710
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
711
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
712 713


C
chengduo 已提交
714
class TestHardSigmoid(TestActivation):
715 716
    def setUp(self):
        self.op_type = "hard_sigmoid"
717 718
        self.init_dtype()

719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
734 735 736 737
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
738 739

    def test_check_grad(self):
740 741
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
742
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
743

744

C
chengduo 已提交
745
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
746 747
    def setUp(self):
        self.op_type = "swish"
748 749 750 751 752 753 754 755 756
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
757 758

    def test_check_grad(self):
759 760
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
761
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
762

763

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
783 784 785 786 787 788 789 790 791 792
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
793

C
chengduo 已提交
794
        def test_check_output(self):
795
            place = core.CUDAPlace(0)
C
chengduo 已提交
796 797 798
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
799

C
chengduo 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
824
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
825
create_test_act_fp16_class(TestSin)
826 827
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
828 829
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
830
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
831 832 833 834 835 836 837 838
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
839
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
840 841 842 843 844 845
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
846
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
847

Q
qijun 已提交
848 849
if __name__ == "__main__":
    unittest.main()