test_activation_op.py 20.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
Q
qijun 已提交
22 23


C
chengduo 已提交
24
class TestActivation(OpTest):
Q
qijun 已提交
25 26
    def setUp(self):
        self.op_type = "exp"
27 28
        self.dtype = np.float32
        self.init_dtype()
29
        self.init_kernel_type()
30 31 32 33 34 35

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
36 37 38 39 40

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
41 42
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
43
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
44

45
    def init_dtype(self):
C
chengduo 已提交
46
        self.dtype = np.float32
47

48 49 50
    def init_kernel_type(self):
        pass

Q
qijun 已提交
51

C
chengduo 已提交
52
class TestSigmoid(TestActivation):
Q
qijun 已提交
53 54
    def setUp(self):
        self.op_type = "sigmoid"
55 56 57 58 59 60 61
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
62

63
    def test_check_grad(self):
64 65 66 67
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

68

C
chengduo 已提交
69
class TestLogSigmoid(TestActivation):
70 71
    def setUp(self):
        self.op_type = "logsigmoid"
72 73 74 75 76 77 78
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
79 80

    def test_check_grad(self):
81 82
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
83
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
84 85


C
chengduo 已提交
86
class TestTanh(TestActivation):
87 88
    def setUp(self):
        self.op_type = "tanh"
89 90 91 92 93 94 95
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
96 97

    def test_check_grad(self):
98 99
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
100
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
101 102


C
chengduo 已提交
103
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
104 105
    def setUp(self):
        self.op_type = "tanh_shrink"
106 107 108 109 110 111 112
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
113 114

    def test_check_grad(self):
115 116
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
117
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
118

119

C
chengduo 已提交
120
class TestHardShrink(TestActivation):
121 122
    def setUp(self):
        self.op_type = "hard_shrink"
123 124
        self.init_dtype()

125
        threshold = 0.5
126 127 128
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
129 130

        self.attrs = {'lambda': threshold}
131 132
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
133 134

    def test_check_grad(self):
135 136
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
137
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
138 139


C
chengduo 已提交
140
class TestSoftShrink(TestActivation):
141 142
    def setUp(self):
        self.op_type = "softshrink"
143 144
        self.init_dtype()

145
        lambda_val = 0.1
146 147 148 149 150
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

151
        self.attrs = {'lambda': lambda_val}
152 153
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
154 155

    def test_check_grad(self):
156 157
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
158
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
159

160

C
chengduo 已提交
161
class TestSqrt(TestActivation):
162 163
    def setUp(self):
        self.op_type = "sqrt"
164 165 166 167 168 169 170
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
171 172

    def test_check_grad(self):
173 174
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
175
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
176

177

C
chengduo 已提交
178
class TestAbs(TestActivation):
179 180
    def setUp(self):
        self.op_type = "abs"
181 182 183
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
184
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
185
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
186
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
187 188
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
189 190 191 192
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
193 194

    def test_check_grad(self):
195 196
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
197
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
198

199

C
chengduo 已提交
200
class TestCeil(TestActivation):
D
dzhwinter 已提交
201 202
    def setUp(self):
        self.op_type = "ceil"
203 204 205 206 207 208 209
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
210

D
dzhwinter 已提交
211
    # The same reason with TestFloor
C
chengduo 已提交
212
    def test_check_grad(self):
213 214 215
        pass


C
chengduo 已提交
216
class TestFloor(TestActivation):
D
dzhwinter 已提交
217 218
    def setUp(self):
        self.op_type = "floor"
219 220 221 222 223 224 225
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
226

D
dzhwinter 已提交
227 228
    # the gradient on floor, ceil, round is undefined.
    # we return zero as gradient, but the numpy return nan 
C
chengduo 已提交
229 230
    # The same reason with TestFloor
    def test_check_grad(self):
231 232 233
        pass


C
chengduo 已提交
234
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
235 236
    def setUp(self):
        self.op_type = "cos"
237 238 239 240 241 242 243
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
244 245

    def test_check_grad(self):
246 247
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
248 249
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

250

C
chengduo 已提交
251
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
252 253
    def setUp(self):
        self.op_type = "sin"
254 255 256 257 258 259 260
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
261 262

    def test_check_grad(self):
263 264
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
265 266 267
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
268
class TestRound(TestActivation):
D
dzhwinter 已提交
269 270
    def setUp(self):
        self.op_type = "round"
271 272 273 274 275 276 277
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
278

C
chengduo 已提交
279
    def test_check_grad(self):
280 281 282
        pass


C
chengduo 已提交
283
class TestRelu(TestActivation):
284
    def setUp(self):
Q
qijun 已提交
285
        self.op_type = "relu"
K
Kexin Zhao 已提交
286 287 288
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
289 290
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
291 292 293 294
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
295 296

    def test_check_grad(self):
K
Kexin Zhao 已提交
297 298
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
299
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
300 301


C
Clementine 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
319
class TestBRelu(TestActivation):
320 321
    def setUp(self):
        self.op_type = "brelu"
322 323 324
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
325 326
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
327 328
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
329
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
330 331 332
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
333 334 335

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
336
        self.outputs = {'Out': t}
337 338

    def test_check_grad(self):
339 340
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
341
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
342

343

C
chengduo 已提交
344
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
345
    def setUp(self):
346
        self.op_type = "relu6"
347 348 349
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
350 351 352 353
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
354
        out = np.minimum(np.maximum(x, 0), threshold)
355

356
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
357
        self.attrs = {'threshold': threshold}
358
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
359

360 361 362 363 364 365
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
366
class TestSoftRelu(TestActivation):
367 368
    def setUp(self):
        self.op_type = "soft_relu"
369 370 371
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
372
        threshold = 2.0
Q
qijun 已提交
373 374 375
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
376 377 378
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
379 380 381 382 383
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
384 385

    def test_check_grad(self):
386 387
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
388
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
389

390

C
chengduo 已提交
391
class TestELU(TestActivation):
392 393
    def setUp(self):
        self.op_type = "elu"
394 395 396
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
397
        alpha = 1.
398
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
399 400 401 402
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
403
        self.outputs = {'Out': out}
404 405

    def test_check_grad(self):
406 407
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
408
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
409 410


C
chengduo 已提交
411
class TestReciprocal(TestActivation):
Q
qijun 已提交
412 413
    def setUp(self):
        self.op_type = "reciprocal"
414 415 416 417 418 419 420
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
421 422

    def test_check_grad(self):
423 424
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
425
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
426 427


C
chengduo 已提交
428
class TestLog(TestActivation):
Q
qijun 已提交
429 430
    def setUp(self):
        self.op_type = "log"
431 432 433 434 435 436 437
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
438 439

    def test_check_grad(self):
440 441
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
442
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
443

444

C
chengduo 已提交
445
class TestSquare(TestActivation):
Q
qijun 已提交
446 447
    def setUp(self):
        self.op_type = "square"
448 449 450 451 452 453 454
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
455 456

    def test_check_grad(self):
457 458
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
459
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
460

461

C
chengduo 已提交
462
class TestPow(TestActivation):
463 464
    def setUp(self):
        self.op_type = "pow"
465 466 467 468 469 470
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
471
        self.attrs = {'factor': 3.0}
472
        self.outputs = {'Out': out}
473 474

    def test_check_grad(self):
475 476
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
477
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
478

479

C
chengduo 已提交
480
class TestSTanh(TestActivation):
481 482
    def setUp(self):
        self.op_type = "stanh"
483 484 485
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
486 487
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
488 489 490
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
491
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
492
        self.outputs = {'Out': out}
493

Q
qijun 已提交
494
    def test_check_grad(self):
495 496
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
497
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
498

499

C
chengduo 已提交
500
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
501 502
    def setUp(self):
        self.op_type = "softplus"
503
        self.init_dtype()
C
chengduo 已提交
504
        self.dtype = np.float64
505 506 507 508 509 510

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
511 512

    def test_check_grad(self):
513 514
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
515
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
516

517

C
chengduo 已提交
518
class TestSoftsign(TestActivation):
519 520
    def setUp(self):
        self.op_type = "softsign"
521 522 523 524 525 526 527
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
528 529

    def test_check_grad(self):
530 531
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
532
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
533 534


C
chengduo 已提交
535
class TestThresholdedRelu(TestActivation):
536 537
    def setUp(self):
        self.op_type = "thresholded_relu"
538 539
        self.init_dtype()

540 541
        threshold = 0.25
        self.relative_error = 0.005
542
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
543 544 545

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
546
        out = (X > threshold) * X
547

548
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
549
        self.attrs = {'threshold': threshold}
550
        self.outputs = {'Out': out}
551 552

    def test_check_grad(self):
553 554
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
555
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
556 557


C
chengduo 已提交
558
class TestHardSigmoid(TestActivation):
559 560
    def setUp(self):
        self.op_type = "hard_sigmoid"
561 562
        self.init_dtype()

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
578 579 580 581
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
582 583

    def test_check_grad(self):
584 585
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
586
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
587

588

C
chengduo 已提交
589
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
590 591
    def setUp(self):
        self.op_type = "swish"
592 593 594 595 596 597 598 599 600
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
601 602

    def test_check_grad(self):
603 604
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
605
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
606

607

608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
627 628 629 630 631 632 633 634 635 636
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
637

C
chengduo 已提交
638
        def test_check_output(self):
639
            place = core.CUDAPlace(0)
C
chengduo 已提交
640 641 642
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
643

C
chengduo 已提交
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
671
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
672 673 674 675 676 677 678 679 680 681 682 683 684 685
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
A
Abhinav Arora 已提交
686

Q
qijun 已提交
687 688
if __name__ == "__main__":
    unittest.main()