test_activation_op.py 19.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
Q
qijun 已提交
22 23


C
chengduo 已提交
24
class TestActivation(OpTest):
Q
qijun 已提交
25 26
    def setUp(self):
        self.op_type = "exp"
27 28 29 30 31 32 33 34
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
35 36 37 38 39

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
40 41
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
42
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
43

44
    def init_dtype(self):
C
chengduo 已提交
45
        self.dtype = np.float32
46

Q
qijun 已提交
47

C
chengduo 已提交
48
class TestSigmoid(TestActivation):
Q
qijun 已提交
49 50
    def setUp(self):
        self.op_type = "sigmoid"
51 52 53 54 55 56 57
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
58

59
    def test_check_grad(self):
60 61 62 63
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

64

C
chengduo 已提交
65
class TestLogSigmoid(TestActivation):
66 67
    def setUp(self):
        self.op_type = "logsigmoid"
68 69 70 71 72 73 74
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
75 76

    def test_check_grad(self):
77 78
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
79
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
80 81


C
chengduo 已提交
82
class TestTanh(TestActivation):
83 84
    def setUp(self):
        self.op_type = "tanh"
85 86 87 88 89 90 91
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
92 93

    def test_check_grad(self):
94 95
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
96
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
97 98


C
chengduo 已提交
99
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
100 101
    def setUp(self):
        self.op_type = "tanh_shrink"
102 103 104 105 106 107 108
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
109 110

    def test_check_grad(self):
111 112
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
113
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
114

115

C
chengduo 已提交
116
class TestHardShrink(TestActivation):
117 118
    def setUp(self):
        self.op_type = "hard_shrink"
119 120
        self.init_dtype()

121
        threshold = 0.5
122 123 124
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
125 126

        self.attrs = {'lambda': threshold}
127 128
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
129 130

    def test_check_grad(self):
131 132
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
133
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
134 135


C
chengduo 已提交
136
class TestSoftShrink(TestActivation):
137 138
    def setUp(self):
        self.op_type = "softshrink"
139 140
        self.init_dtype()

141
        lambda_val = 0.1
142 143 144 145 146
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

147
        self.attrs = {'lambda': lambda_val}
148 149
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
150 151

    def test_check_grad(self):
152 153
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
154
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
155

156

C
chengduo 已提交
157
class TestSqrt(TestActivation):
158 159
    def setUp(self):
        self.op_type = "sqrt"
160 161 162 163 164 165 166
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
167 168

    def test_check_grad(self):
169 170
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
171
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
172

173

C
chengduo 已提交
174
class TestAbs(TestActivation):
175 176
    def setUp(self):
        self.op_type = "abs"
177 178 179
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
180
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
181
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
182
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
183 184
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
185 186 187 188
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
189 190

    def test_check_grad(self):
191 192
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
193
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
194

195

C
chengduo 已提交
196
class TestCeil(TestActivation):
D
dzhwinter 已提交
197 198
    def setUp(self):
        self.op_type = "ceil"
199 200 201 202 203 204 205
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
206

D
dzhwinter 已提交
207
    # The same reason with TestFloor
C
chengduo 已提交
208
    def test_check_grad(self):
209 210 211
        pass


C
chengduo 已提交
212
class TestFloor(TestActivation):
D
dzhwinter 已提交
213 214
    def setUp(self):
        self.op_type = "floor"
215 216 217 218 219 220 221
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
222

D
dzhwinter 已提交
223 224
    # the gradient on floor, ceil, round is undefined.
    # we return zero as gradient, but the numpy return nan 
C
chengduo 已提交
225 226
    # The same reason with TestFloor
    def test_check_grad(self):
227 228 229
        pass


C
chengduo 已提交
230
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
231 232
    def setUp(self):
        self.op_type = "cos"
233 234 235 236 237 238 239
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
240 241

    def test_check_grad(self):
242 243
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
244 245
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

246

C
chengduo 已提交
247
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
248 249
    def setUp(self):
        self.op_type = "sin"
250 251 252 253 254 255 256
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
257 258

    def test_check_grad(self):
259 260
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
261 262 263
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
264
class TestRound(TestActivation):
D
dzhwinter 已提交
265 266
    def setUp(self):
        self.op_type = "round"
267 268 269 270 271 272 273
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
274

C
chengduo 已提交
275
    def test_check_grad(self):
276 277 278
        pass


C
chengduo 已提交
279
class TestRelu(TestActivation):
280
    def setUp(self):
Q
qijun 已提交
281
        self.op_type = "relu"
K
Kexin Zhao 已提交
282 283 284
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
285 286
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
287 288 289 290
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
291 292

    def test_check_grad(self):
K
Kexin Zhao 已提交
293 294
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
295
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
296 297


C
Clementine 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
315
class TestBRelu(TestActivation):
316 317
    def setUp(self):
        self.op_type = "brelu"
318 319 320
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
321 322
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
323 324
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
325
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
326 327 328
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
329 330 331

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
332
        self.outputs = {'Out': t}
333 334

    def test_check_grad(self):
335 336
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
337
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
338

339

C
chengduo 已提交
340
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
341
    def setUp(self):
342
        self.op_type = "relu6"
343 344 345
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
346 347 348 349
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
350
        out = np.minimum(np.maximum(x, 0), threshold)
351

352
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
353
        self.attrs = {'threshold': threshold}
354
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
355

356 357 358 359 360 361
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
362
class TestSoftRelu(TestActivation):
363 364
    def setUp(self):
        self.op_type = "soft_relu"
365 366 367
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
368
        threshold = 2.0
Q
qijun 已提交
369 370 371
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
372 373 374
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
375 376 377 378 379
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
380 381

    def test_check_grad(self):
382 383
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
384
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
385

386

C
chengduo 已提交
387
class TestELU(TestActivation):
388 389
    def setUp(self):
        self.op_type = "elu"
390 391 392
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
393
        alpha = 1.
394
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
395 396 397 398
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
399
        self.outputs = {'Out': out}
400 401

    def test_check_grad(self):
402 403
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
404
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
405 406


C
chengduo 已提交
407
class TestReciprocal(TestActivation):
Q
qijun 已提交
408 409
    def setUp(self):
        self.op_type = "reciprocal"
410 411 412 413 414 415 416
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
417 418

    def test_check_grad(self):
419 420
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
421
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
422 423


C
chengduo 已提交
424
class TestLog(TestActivation):
Q
qijun 已提交
425 426
    def setUp(self):
        self.op_type = "log"
427 428 429 430 431 432 433
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
434 435

    def test_check_grad(self):
436 437
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
438
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
439

440

C
chengduo 已提交
441
class TestSquare(TestActivation):
Q
qijun 已提交
442 443
    def setUp(self):
        self.op_type = "square"
444 445 446 447 448 449 450
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
451 452

    def test_check_grad(self):
453 454
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
455
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
456

457

C
chengduo 已提交
458
class TestPow(TestActivation):
459 460
    def setUp(self):
        self.op_type = "pow"
461 462 463 464 465 466
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
467
        self.attrs = {'factor': 3.0}
468
        self.outputs = {'Out': out}
469 470

    def test_check_grad(self):
471 472
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
473
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
474

475

C
chengduo 已提交
476
class TestSTanh(TestActivation):
477 478
    def setUp(self):
        self.op_type = "stanh"
479 480 481
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
482 483
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
484 485 486
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
487
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
488
        self.outputs = {'Out': out}
489

Q
qijun 已提交
490
    def test_check_grad(self):
491 492
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
493
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
494

495

C
chengduo 已提交
496
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
497 498
    def setUp(self):
        self.op_type = "softplus"
499
        self.init_dtype()
C
chengduo 已提交
500
        self.dtype = np.float64
501 502 503 504 505 506

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
507 508

    def test_check_grad(self):
509 510
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
511
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
512

513

C
chengduo 已提交
514
class TestSoftsign(TestActivation):
515 516
    def setUp(self):
        self.op_type = "softsign"
517 518 519 520 521 522 523
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
524 525

    def test_check_grad(self):
526 527
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
528
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
529 530


C
chengduo 已提交
531
class TestThresholdedRelu(TestActivation):
532 533
    def setUp(self):
        self.op_type = "thresholded_relu"
534 535
        self.init_dtype()

536 537
        threshold = 0.25
        self.relative_error = 0.005
538
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
539 540 541

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
542
        out = (X > threshold) * X
543

544
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
545
        self.attrs = {'threshold': threshold}
546
        self.outputs = {'Out': out}
547 548

    def test_check_grad(self):
549 550
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
551
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
552 553


C
chengduo 已提交
554
class TestHardSigmoid(TestActivation):
555 556
    def setUp(self):
        self.op_type = "hard_sigmoid"
557 558
        self.init_dtype()

559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
574 575 576 577
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
578 579

    def test_check_grad(self):
580 581
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
582
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
583

584

C
chengduo 已提交
585
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
586 587
    def setUp(self):
        self.op_type = "swish"
588 589 590 591 592 593 594 595 596
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
597 598

    def test_check_grad(self):
599 600
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
601
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
602

603

C
chengduo 已提交
604 605 606 607 608 609 610 611 612 613
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
614

C
chengduo 已提交
615
        def test_check_output(self):
616
            place = core.CUDAPlace(0)
C
chengduo 已提交
617 618 619
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
620

C
chengduo 已提交
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
648
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
649 650 651 652 653 654 655 656 657 658 659 660 661 662
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
A
Abhinav Arora 已提交
663

Q
qijun 已提交
664 665
if __name__ == "__main__":
    unittest.main()