test_activation_op.py 18.8 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
A
Abhinav Arora 已提交
21
from scipy.special import expit
Q
qijun 已提交
22 23


C
chengduo 已提交
24
class TestActivation(OpTest):
Q
qijun 已提交
25 26
    def setUp(self):
        self.op_type = "exp"
27 28 29 30 31 32 33 34
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
35 36 37 38 39

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
40 41
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
42
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
43

44
    def init_dtype(self):
C
chengduo 已提交
45
        self.dtype = np.float32
46

Q
qijun 已提交
47

C
chengduo 已提交
48
class TestSigmoid(TestActivation):
Q
qijun 已提交
49 50
    def setUp(self):
        self.op_type = "sigmoid"
51 52 53 54 55 56 57
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
58

59
    def test_check_grad(self):
60 61 62 63
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

64

C
chengduo 已提交
65
class TestLogSigmoid(TestActivation):
66 67
    def setUp(self):
        self.op_type = "logsigmoid"
68 69 70 71 72 73 74
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
75 76

    def test_check_grad(self):
77 78
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
79
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
80 81


C
chengduo 已提交
82
class TestTanh(TestActivation):
83 84
    def setUp(self):
        self.op_type = "tanh"
85 86 87 88 89 90 91
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
92 93

    def test_check_grad(self):
94 95
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
96
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
97 98


C
chengduo 已提交
99
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
100 101
    def setUp(self):
        self.op_type = "tanh_shrink"
102 103 104 105 106 107 108
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
109 110

    def test_check_grad(self):
111 112
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
113
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
114

115

C
chengduo 已提交
116
class TestHardShrink(TestActivation):
117 118
    def setUp(self):
        self.op_type = "hard_shrink"
119 120
        self.init_dtype()

121
        threshold = 0.5
122 123 124
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
125 126

        self.attrs = {'lambda': threshold}
127 128
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
129 130

    def test_check_grad(self):
131 132
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
133
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
134 135


C
chengduo 已提交
136
class TestSoftShrink(TestActivation):
137 138
    def setUp(self):
        self.op_type = "softshrink"
139 140
        self.init_dtype()

141
        lambda_val = 0.1
142 143 144 145 146
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

147
        self.attrs = {'lambda': lambda_val}
148 149
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
150 151

    def test_check_grad(self):
152 153
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
154
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
155

156

C
chengduo 已提交
157
class TestSqrt(TestActivation):
158 159
    def setUp(self):
        self.op_type = "sqrt"
160 161 162 163 164 165 166
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
167 168

    def test_check_grad(self):
169 170
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
171
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
172

173

C
chengduo 已提交
174
class TestAbs(TestActivation):
175 176
    def setUp(self):
        self.op_type = "abs"
177 178 179
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
C
chengduo 已提交
180
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
181
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
182
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
183 184
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
185 186 187 188
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
189 190

    def test_check_grad(self):
191 192
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
193
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
194

195

C
chengduo 已提交
196
class TestCeil(TestActivation):
D
dzhwinter 已提交
197 198
    def setUp(self):
        self.op_type = "ceil"
199 200 201 202 203 204 205
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
206

D
dzhwinter 已提交
207
    # The same reason with TestFloor
C
chengduo 已提交
208
    def test_check_grad(self):
209 210 211
        pass


C
chengduo 已提交
212
class TestFloor(TestActivation):
D
dzhwinter 已提交
213 214
    def setUp(self):
        self.op_type = "floor"
215 216 217 218 219 220 221
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
222

D
dzhwinter 已提交
223 224
    # the gradient on floor, ceil, round is undefined.
    # we return zero as gradient, but the numpy return nan 
C
chengduo 已提交
225 226
    # The same reason with TestFloor
    def test_check_grad(self):
227 228 229
        pass


C
chengduo 已提交
230
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
231 232
    def setUp(self):
        self.op_type = "cos"
233 234 235 236 237 238 239
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
240 241

    def test_check_grad(self):
242 243
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
244 245
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

246

C
chengduo 已提交
247
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
248 249
    def setUp(self):
        self.op_type = "sin"
250 251 252 253 254 255 256
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
257 258

    def test_check_grad(self):
259 260
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
261 262 263
        self.check_grad(['X'], 'Out', max_relative_error=0.007)


C
chengduo 已提交
264
class TestRound(TestActivation):
D
dzhwinter 已提交
265 266
    def setUp(self):
        self.op_type = "round"
267 268 269 270 271 272 273
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
274

C
chengduo 已提交
275
    def test_check_grad(self):
276 277 278
        pass


C
chengduo 已提交
279
class TestRelu(TestActivation):
280
    def setUp(self):
Q
qijun 已提交
281
        self.op_type = "relu"
K
Kexin Zhao 已提交
282 283 284
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
285 286
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
287 288 289 290
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
291 292

    def test_check_grad(self):
K
Kexin Zhao 已提交
293 294
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
295
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
296 297


C
chengduo 已提交
298
class TestBRelu(TestActivation):
299 300
    def setUp(self):
        self.op_type = "brelu"
301 302 303
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
304 305
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
306 307
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
308
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
309 310 311
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
312 313 314

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
315
        self.outputs = {'Out': t}
316 317

    def test_check_grad(self):
318 319
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
320
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
321

322

C
chengduo 已提交
323
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
324
    def setUp(self):
325
        self.op_type = "relu6"
326 327 328
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
329 330 331 332
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
333
        out = np.minimum(np.maximum(x, 0), threshold)
334

335
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
336
        self.attrs = {'threshold': threshold}
337
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
338

339 340 341 342 343 344
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)


C
chengduo 已提交
345
class TestSoftRelu(TestActivation):
346 347
    def setUp(self):
        self.op_type = "soft_relu"
348 349 350
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
351
        threshold = 2.0
Q
qijun 已提交
352 353 354
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
355 356 357
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
358 359 360 361 362
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
363 364

    def test_check_grad(self):
365 366
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
367
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
368

369

C
chengduo 已提交
370
class TestELU(TestActivation):
371 372
    def setUp(self):
        self.op_type = "elu"
373 374 375
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
376
        alpha = 1.
377
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
378 379 380 381
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
382
        self.outputs = {'Out': out}
383 384

    def test_check_grad(self):
385 386
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
387
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
388 389


C
chengduo 已提交
390
class TestReciprocal(TestActivation):
Q
qijun 已提交
391 392
    def setUp(self):
        self.op_type = "reciprocal"
393 394 395 396 397 398 399
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
400 401

    def test_check_grad(self):
402 403
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
404
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
405 406


C
chengduo 已提交
407
class TestLog(TestActivation):
Q
qijun 已提交
408 409
    def setUp(self):
        self.op_type = "log"
410 411 412 413 414 415 416
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
417 418

    def test_check_grad(self):
419 420
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
421
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
422

423

C
chengduo 已提交
424
class TestSquare(TestActivation):
Q
qijun 已提交
425 426
    def setUp(self):
        self.op_type = "square"
427 428 429 430 431 432 433
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
434 435

    def test_check_grad(self):
436 437
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
438
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
439

440

C
chengduo 已提交
441
class TestPow(TestActivation):
442 443
    def setUp(self):
        self.op_type = "pow"
444 445 446 447 448 449
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
450
        self.attrs = {'factor': 3.0}
451
        self.outputs = {'Out': out}
452 453

    def test_check_grad(self):
454 455
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
456
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
457

458

C
chengduo 已提交
459
class TestSTanh(TestActivation):
460 461
    def setUp(self):
        self.op_type = "stanh"
462 463 464
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
465 466
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
467 468 469
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
470
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
471
        self.outputs = {'Out': out}
472

Q
qijun 已提交
473
    def test_check_grad(self):
474 475
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
476
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
477

478

C
chengduo 已提交
479
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
480 481
    def setUp(self):
        self.op_type = "softplus"
482
        self.init_dtype()
C
chengduo 已提交
483
        self.dtype = np.float64
484 485 486 487 488 489

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
490 491

    def test_check_grad(self):
492 493
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
494
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
495

496

C
chengduo 已提交
497
class TestSoftsign(TestActivation):
498 499
    def setUp(self):
        self.op_type = "softsign"
500 501 502 503 504 505 506
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
507 508

    def test_check_grad(self):
509 510
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
511
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
512 513


C
chengduo 已提交
514
class TestThresholdedRelu(TestActivation):
515 516
    def setUp(self):
        self.op_type = "thresholded_relu"
517 518
        self.init_dtype()

519 520
        threshold = 0.25
        self.relative_error = 0.005
521
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
522 523 524

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
525
        out = (X > threshold) * X
526

527
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
528
        self.attrs = {'threshold': threshold}
529
        self.outputs = {'Out': out}
530 531

    def test_check_grad(self):
532 533
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
534
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
535 536


C
chengduo 已提交
537
class TestHardSigmoid(TestActivation):
538 539
    def setUp(self):
        self.op_type = "hard_sigmoid"
540 541
        self.init_dtype()

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
557 558 559 560
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
561 562

    def test_check_grad(self):
563 564
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
565
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
566

567

C
chengduo 已提交
568
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
569 570
    def setUp(self):
        self.op_type = "swish"
571 572 573 574 575 576 577 578 579
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
580 581

    def test_check_grad(self):
582 583
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
584
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
585

586

C
chengduo 已提交
587 588 589 590 591 592 593 594 595 596
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
597

C
chengduo 已提交
598
        def test_check_output(self):
599
            place = core.CUDAPlace(0)
C
chengduo 已提交
600 601 602
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
603

C
chengduo 已提交
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
create_test_act_fp16_class(TestSin)
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
A
Abhinav Arora 已提交
645

Q
qijun 已提交
646 647
if __name__ == "__main__":
    unittest.main()