test_activation_op.py 21.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
Q
qijun 已提交
22 23


C
chengduo 已提交
24
class TestActivation(OpTest):
Q
qijun 已提交
25 26
    def setUp(self):
        self.op_type = "exp"
27 28
        self.dtype = np.float32
        self.init_dtype()
29
        self.init_kernel_type()
30 31 32 33 34 35

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
36 37 38 39 40

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
41 42
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
43
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
44

45
    def init_dtype(self):
C
chengduo 已提交
46
        self.dtype = np.float32
47

48 49 50
    def init_kernel_type(self):
        pass

Q
qijun 已提交
51

C
chengduo 已提交
52
class TestSigmoid(TestActivation):
Q
qijun 已提交
53 54
    def setUp(self):
        self.op_type = "sigmoid"
55 56 57 58 59 60 61
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
62

63
    def test_check_grad(self):
64 65 66 67
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

68

C
chengduo 已提交
69
class TestLogSigmoid(TestActivation):
70 71
    def setUp(self):
        self.op_type = "logsigmoid"
72 73 74 75 76 77 78
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
79 80

    def test_check_grad(self):
81 82
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
83
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
84 85


C
chengduo 已提交
86
class TestTanh(TestActivation):
87 88
    def setUp(self):
        self.op_type = "tanh"
89 90 91 92 93 94 95
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
96 97

    def test_check_grad(self):
98 99
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
100
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
101 102


103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
class TestAtan(TestActivation):
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
120
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
121 122
    def setUp(self):
        self.op_type = "tanh_shrink"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
135

136

C
chengduo 已提交
137
class TestHardShrink(TestActivation):
138 139
    def setUp(self):
        self.op_type = "hard_shrink"
140 141
        self.init_dtype()

142
        threshold = 0.5
143 144 145
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
146 147

        self.attrs = {'lambda': threshold}
148 149
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
150 151

    def test_check_grad(self):
152 153
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
154
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
155 156


C
chengduo 已提交
157
class TestSoftShrink(TestActivation):
158 159
    def setUp(self):
        self.op_type = "softshrink"
160 161
        self.init_dtype()

162
        lambda_val = 0.1
163 164 165 166 167
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

168
        self.attrs = {'lambda': lambda_val}
169 170
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
171 172

    def test_check_grad(self):
173 174
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
175
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
176

177

C
chengduo 已提交
178
class TestSqrt(TestActivation):
179 180
    def setUp(self):
        self.op_type = "sqrt"
181 182 183 184 185 186 187
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
188 189

    def test_check_grad(self):
190 191
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
192
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
193

194

C
chengduo 已提交
195
class TestAbs(TestActivation):
196 197
    def setUp(self):
        self.op_type = "abs"
198 199 200
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
201
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
202
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
203
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
204 205
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
206 207 208 209
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
210 211

    def test_check_grad(self):
212 213
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
214
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
215

216

C
chengduo 已提交
217
class TestCeil(TestActivation):
D
dzhwinter 已提交
218 219
    def setUp(self):
        self.op_type = "ceil"
220 221 222 223 224 225 226
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
227

D
dzhwinter 已提交
228
    # The same reason with TestFloor
C
chengduo 已提交
229
    def test_check_grad(self):
230 231 232
        pass


C
chengduo 已提交
233
class TestFloor(TestActivation):
D
dzhwinter 已提交
234 235
    def setUp(self):
        self.op_type = "floor"
236 237 238 239 240 241 242
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
243

D
dzhwinter 已提交
244 245
    # the gradient on floor, ceil, round is undefined.
    # we return zero as gradient, but the numpy return nan 
C
chengduo 已提交
246 247
    # The same reason with TestFloor
    def test_check_grad(self):
248 249 250
        pass


C
chengduo 已提交
251
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
252 253
    def setUp(self):
        self.op_type = "cos"
254 255 256 257 258 259 260
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
261 262

    def test_check_grad(self):
263 264
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
265 266
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

267

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
285
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
286 287
    def setUp(self):
        self.op_type = "sin"
288 289 290 291 292 293 294
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
295 296

    def test_check_grad(self):
297 298
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
299 300 301
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
319
class TestRound(TestActivation):
D
dzhwinter 已提交
320 321
    def setUp(self):
        self.op_type = "round"
322 323 324 325 326 327 328
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
329

C
chengduo 已提交
330
    def test_check_grad(self):
331 332 333
        pass


C
chengduo 已提交
334
class TestRelu(TestActivation):
335
    def setUp(self):
Q
qijun 已提交
336
        self.op_type = "relu"
K
Kexin Zhao 已提交
337 338 339
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
340 341
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
342 343 344 345
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
346 347

    def test_check_grad(self):
K
Kexin Zhao 已提交
348 349
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
350
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
351 352


C
Clementine 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
370
class TestBRelu(TestActivation):
371 372
    def setUp(self):
        self.op_type = "brelu"
373 374 375
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
376 377
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
378 379
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
380
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
381 382 383
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
384 385 386

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
387
        self.outputs = {'Out': t}
388 389

    def test_check_grad(self):
390 391
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
392
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
393

394

C
chengduo 已提交
395
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
396
    def setUp(self):
397
        self.op_type = "relu6"
398 399 400
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
401 402 403 404
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
405
        out = np.minimum(np.maximum(x, 0), threshold)
406

407
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
408
        self.attrs = {'threshold': threshold}
409
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
410

411 412 413 414 415 416
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
417
class TestSoftRelu(TestActivation):
418 419
    def setUp(self):
        self.op_type = "soft_relu"
420 421 422
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
423
        threshold = 2.0
Q
qijun 已提交
424 425 426
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
427 428 429
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
430 431 432 433 434
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
435 436

    def test_check_grad(self):
437 438
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
439
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
440

441

C
chengduo 已提交
442
class TestELU(TestActivation):
443 444
    def setUp(self):
        self.op_type = "elu"
445 446 447
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
448
        alpha = 1.
449
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
450 451 452 453
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
454
        self.outputs = {'Out': out}
455 456

    def test_check_grad(self):
457 458
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
459
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
460 461


C
chengduo 已提交
462
class TestReciprocal(TestActivation):
Q
qijun 已提交
463 464
    def setUp(self):
        self.op_type = "reciprocal"
465 466 467 468 469 470 471
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
472 473

    def test_check_grad(self):
474 475
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
476
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
477 478


C
chengduo 已提交
479
class TestLog(TestActivation):
Q
qijun 已提交
480 481
    def setUp(self):
        self.op_type = "log"
482 483 484 485 486 487 488
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
489 490

    def test_check_grad(self):
491 492
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
493
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
494

495

C
chengduo 已提交
496
class TestSquare(TestActivation):
Q
qijun 已提交
497 498
    def setUp(self):
        self.op_type = "square"
499 500 501 502 503 504 505
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
506 507

    def test_check_grad(self):
508 509
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
510
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
511

512

C
chengduo 已提交
513
class TestPow(TestActivation):
514 515
    def setUp(self):
        self.op_type = "pow"
516 517 518 519 520 521
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
522
        self.attrs = {'factor': 3.0}
523
        self.outputs = {'Out': out}
524 525

    def test_check_grad(self):
526 527
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
528
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
529

530

C
chengduo 已提交
531
class TestSTanh(TestActivation):
532 533
    def setUp(self):
        self.op_type = "stanh"
534 535 536
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
537 538
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
539 540 541
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
542
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
543
        self.outputs = {'Out': out}
544

Q
qijun 已提交
545
    def test_check_grad(self):
546 547
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
548
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
549

550

C
chengduo 已提交
551
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
552 553
    def setUp(self):
        self.op_type = "softplus"
554
        self.init_dtype()
C
chengduo 已提交
555
        self.dtype = np.float64
556 557 558 559 560 561

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
562 563

    def test_check_grad(self):
564 565
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
566
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
567

568

C
chengduo 已提交
569
class TestSoftsign(TestActivation):
570 571
    def setUp(self):
        self.op_type = "softsign"
572 573 574 575 576 577 578
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
579 580

    def test_check_grad(self):
581 582
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
583
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
584 585


C
chengduo 已提交
586
class TestThresholdedRelu(TestActivation):
587 588
    def setUp(self):
        self.op_type = "thresholded_relu"
589 590
        self.init_dtype()

591 592
        threshold = 0.25
        self.relative_error = 0.005
593
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
594 595 596

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
597
        out = (X > threshold) * X
598

599
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
600
        self.attrs = {'threshold': threshold}
601
        self.outputs = {'Out': out}
602 603

    def test_check_grad(self):
604 605
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
606
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
607 608


C
chengduo 已提交
609
class TestHardSigmoid(TestActivation):
610 611
    def setUp(self):
        self.op_type = "hard_sigmoid"
612 613
        self.init_dtype()

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
629 630 631 632
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
633 634

    def test_check_grad(self):
635 636
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
637
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
638

639

C
chengduo 已提交
640
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
641 642
    def setUp(self):
        self.op_type = "swish"
643 644 645 646 647 648 649 650 651
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
652 653

    def test_check_grad(self):
654 655
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
656
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
657

658

659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
678 679 680 681 682 683 684 685 686 687
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
688

C
chengduo 已提交
689
        def test_check_output(self):
690
            place = core.CUDAPlace(0)
C
chengduo 已提交
691 692 693
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
694

C
chengduo 已提交
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
719
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
720
create_test_act_fp16_class(TestSin)
721 722
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
723 724
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
725
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
726 727 728 729 730 731 732 733 734 735 736 737 738 739
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
A
Abhinav Arora 已提交
740

Q
qijun 已提交
741 742
if __name__ == "__main__":
    unittest.main()