test_activation_op.py 26.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22 23
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
24 25


26
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
42
class TestActivation(OpTest):
Q
qijun 已提交
43 44
    def setUp(self):
        self.op_type = "exp"
45
        self.init_dtype()
46
        self.init_kernel_type()
47 48 49 50 51 52

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
53 54 55 56 57

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
58 59
        if self.dtype == np.float16:
            return
60
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
61

62
    def init_dtype(self):
63
        self.dtype = np.float64
64

65 66 67
    def init_kernel_type(self):
        pass

Q
qijun 已提交
68

C
chengduo 已提交
69
class TestSigmoid(TestActivation):
Q
qijun 已提交
70 71
    def setUp(self):
        self.op_type = "sigmoid"
72 73 74 75 76 77 78
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
79

80 81 82
    def init_dtype(self):
        self.dtype = np.float32

83
    def test_check_grad(self):
84 85 86 87
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

88

C
chengduo 已提交
89
class TestLogSigmoid(TestActivation):
90 91
    def setUp(self):
        self.op_type = "logsigmoid"
92 93 94 95 96 97 98
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
99 100

    def test_check_grad(self):
101 102
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
103
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
104 105


C
chengduo 已提交
106
class TestTanh(TestActivation):
107 108
    def setUp(self):
        self.op_type = "tanh"
109 110 111 112 113 114
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
115 116

    def test_check_grad(self):
117 118
        if self.dtype == np.float16:
            return
119
        self.check_grad(['X'], 'Out')
120

121 122 123 124 125 126
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

127

128 129 130 131 132 133 134 135 136 137 138 139 140 141
class TestAtan(TestActivation):
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
142
        self.check_grad(['X'], 'Out')
143 144


C
chengduo 已提交
145
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
146 147
    def setUp(self):
        self.op_type = "tanh_shrink"
148 149 150 151 152 153 154
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
155 156

    def test_check_grad(self):
157 158
        if self.dtype == np.float16:
            return
159
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
160

161

C
chengduo 已提交
162
class TestHardShrink(TestActivation):
163 164
    def setUp(self):
        self.op_type = "hard_shrink"
165 166
        self.init_dtype()

167
        threshold = 0.5
Z
zhupengyang 已提交
168
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
169 170
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
171 172

        self.attrs = {'lambda': threshold}
173 174
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
175 176

    def test_check_grad(self):
177 178
        if self.dtype == np.float16:
            return
179
        self.check_grad(['X'], 'Out')
180 181


C
chengduo 已提交
182
class TestSoftShrink(TestActivation):
183 184
    def setUp(self):
        self.op_type = "softshrink"
185 186
        self.init_dtype()

187
        lambda_val = 0.1
Z
zhupengyang 已提交
188
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
189 190 191 192
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

193
        self.attrs = {'lambda': lambda_val}
194 195
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
196 197

    def test_check_grad(self):
198 199
        if self.dtype == np.float16:
            return
200
        self.check_grad(['X'], 'Out')
201

202

C
chengduo 已提交
203
class TestSqrt(TestActivation):
204 205
    def setUp(self):
        self.op_type = "sqrt"
206 207 208 209 210 211 212
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
213 214

    def test_check_grad(self):
215 216
        if self.dtype == np.float16:
            return
217
        self.check_grad(['X'], 'Out')
218

219

Z
zhoukunsheng 已提交
220 221 222 223 224
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
225
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
226 227 228 229 230 231 232 233 234 235 236
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
237
class TestAbs(TestActivation):
238 239
    def setUp(self):
        self.op_type = "abs"
240 241
        self.init_dtype()

242
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
243
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
244
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
245
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
246 247
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
248 249 250 251
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
252 253

    def test_check_grad(self):
254 255
        if self.dtype == np.float16:
            return
256
        self.check_grad(['X'], 'Out')
257

258

C
chengduo 已提交
259
class TestCeil(TestActivation):
D
dzhwinter 已提交
260 261
    def setUp(self):
        self.op_type = "ceil"
262 263
        self.init_dtype()

Z
zhupengyang 已提交
264
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
265 266 267 268
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
269

D
dzhwinter 已提交
270
    # The same reason with TestFloor
C
chengduo 已提交
271
    def test_check_grad(self):
272 273 274
        pass


C
chengduo 已提交
275
class TestFloor(TestActivation):
D
dzhwinter 已提交
276 277
    def setUp(self):
        self.op_type = "floor"
278 279
        self.init_dtype()

Z
zhupengyang 已提交
280
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
281 282 283 284
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
285

D
dzhwinter 已提交
286
    # the gradient on floor, ceil, round is undefined.
287
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
288 289
    # The same reason with TestFloor
    def test_check_grad(self):
290 291 292
        pass


C
chengduo 已提交
293
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
294 295
    def setUp(self):
        self.op_type = "cos"
296 297
        self.init_dtype()

Z
zhupengyang 已提交
298
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
299 300 301 302
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
303 304

    def test_check_grad(self):
305 306
        if self.dtype == np.float16:
            return
307
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
308

309

310 311 312 313 314
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
315
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
316 317 318 319 320 321 322 323
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
324
        self.check_grad(['X'], 'Out')
325 326


C
chengduo 已提交
327
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
328 329
    def setUp(self):
        self.op_type = "sin"
330 331
        self.init_dtype()

Z
zhupengyang 已提交
332
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
333 334 335 336
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
337 338

    def test_check_grad(self):
339 340
        if self.dtype == np.float16:
            return
341
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
342 343


344 345 346 347 348
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
349
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
350 351 352 353 354 355 356 357
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
358
        self.check_grad(['X'], 'Out')
359 360


C
chengduo 已提交
361
class TestRound(TestActivation):
D
dzhwinter 已提交
362 363
    def setUp(self):
        self.op_type = "round"
364 365
        self.init_dtype()

Z
zhupengyang 已提交
366
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
367 368 369 370
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
371

C
chengduo 已提交
372
    def test_check_grad(self):
373 374 375
        pass


C
chengduo 已提交
376
class TestRelu(TestActivation):
377
    def setUp(self):
Q
qijun 已提交
378
        self.op_type = "relu"
K
Kexin Zhao 已提交
379 380 381
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
382 383
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
384 385 386 387
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
388 389

    def test_check_grad(self):
K
Kexin Zhao 已提交
390 391
        if self.dtype == np.float16:
            return
392
        self.check_grad(['X'], 'Out')
A
Adam 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
411
        self.check_grad(['X'], 'Out')
412 413


414 415 416 417 418 419 420 421 422 423
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
424 425 426
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
427 428 429
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
430

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
446
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
447
        out = gelu(x, approximate)
C
Clementine 已提交
448 449 450

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
451
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
452 453 454 455

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
456
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
457 458


C
chengduo 已提交
459
class TestBRelu(TestActivation):
460 461
    def setUp(self):
        self.op_type = "brelu"
462 463
        self.init_dtype()

Z
zhupengyang 已提交
464
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
465 466
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
467 468
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
469
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
470 471 472
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
473 474 475

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
476
        self.outputs = {'Out': t}
477 478

    def test_check_grad(self):
479 480
        if self.dtype == np.float16:
            return
481
        self.check_grad(['X'], 'Out')
482

483

C
chengduo 已提交
484
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
485
    def setUp(self):
486
        self.op_type = "relu6"
487 488
        self.init_dtype()

Z
zhupengyang 已提交
489
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
490 491 492 493
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
494
        out = np.minimum(np.maximum(x, 0), threshold)
495

496
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
497
        self.attrs = {'threshold': threshold}
498
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
499

500 501 502
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
503
        self.check_grad(['X'], 'Out')
504 505


H
huangjun12 已提交
506 507 508 509 510
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
511
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
527
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
528 529


C
chengduo 已提交
530
class TestSoftRelu(TestActivation):
531 532
    def setUp(self):
        self.op_type = "soft_relu"
533 534 535
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
536
        threshold = 2.0
Q
qijun 已提交
537 538
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
539
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
540 541 542
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
543 544 545 546 547
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
548 549

    def test_check_grad(self):
550 551
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
552
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
553

554

C
chengduo 已提交
555
class TestELU(TestActivation):
556 557
    def setUp(self):
        self.op_type = "elu"
558 559
        self.init_dtype()

Z
zhupengyang 已提交
560
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
561
        alpha = 1.
562
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
563 564 565 566
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
567
        self.outputs = {'Out': out}
568 569

    def test_check_grad(self):
570 571
        if self.dtype == np.float16:
            return
572
        self.check_grad(['X'], 'Out')
573 574


575
class TestELUOpError(unittest.TestCase):
576 577 578 579 580 581 582 583 584 585 586
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
587
class TestReciprocal(TestActivation):
Q
qijun 已提交
588 589
    def setUp(self):
        self.op_type = "reciprocal"
590 591 592 593 594 595 596
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
597 598

    def test_check_grad(self):
599 600
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
601
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
602 603


C
chengduo 已提交
604
class TestLog(TestActivation):
Q
qijun 已提交
605 606
    def setUp(self):
        self.op_type = "log"
607 608 609 610 611 612 613
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
614 615

    def test_check_grad(self):
616 617
        if self.dtype == np.float16:
            return
618
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
619

620

C
chengduo 已提交
621
class TestSquare(TestActivation):
Q
qijun 已提交
622 623
    def setUp(self):
        self.op_type = "square"
624 625 626 627 628 629 630
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
631 632

    def test_check_grad(self):
633 634
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
635
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
636

637

C
chengduo 已提交
638
class TestPow(TestActivation):
639 640
    def setUp(self):
        self.op_type = "pow"
641 642 643 644 645 646
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
647
        self.attrs = {'factor': 3.0}
648
        self.outputs = {'Out': out}
649 650

    def test_check_grad(self):
651 652
        if self.dtype == np.float16:
            return
653
        self.check_grad(['X'], 'Out')
654

655

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
678
        self.check_grad(['X'], 'Out')
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700

    def test_api(self):
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2 = exe.run(fluid.default_main_program(),
                               feed={"x": input},
                               fetch_list=[out_1, out_2])

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))


C
chengduo 已提交
701
class TestSTanh(TestActivation):
702 703
    def setUp(self):
        self.op_type = "stanh"
704 705 706
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
707 708
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
709 710 711
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
712
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
713
        self.outputs = {'Out': out}
714

Q
qijun 已提交
715
    def test_check_grad(self):
716 717
        if self.dtype == np.float16:
            return
718
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
719

720

C
chengduo 已提交
721
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
722 723
    def setUp(self):
        self.op_type = "softplus"
724
        self.init_dtype()
C
chengduo 已提交
725
        self.dtype = np.float64
726 727 728 729 730 731

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
732 733

    def test_check_grad(self):
734 735
        if self.dtype == np.float16:
            return
736
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
737

738

C
chengduo 已提交
739
class TestSoftsign(TestActivation):
740 741
    def setUp(self):
        self.op_type = "softsign"
742 743 744 745 746 747 748
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
749 750

    def test_check_grad(self):
751 752
        if self.dtype == np.float16:
            return
753
        self.check_grad(['X'], 'Out')
754 755


C
chengduo 已提交
756
class TestThresholdedRelu(TestActivation):
757 758
    def setUp(self):
        self.op_type = "thresholded_relu"
759 760
        self.init_dtype()

761
        threshold = 0.25
Z
zhupengyang 已提交
762
        self.delta = 0.005
763
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
764 765

        # Same reason as TestAbs
Z
zhupengyang 已提交
766
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
767
        out = (X > threshold) * X
768

769
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
770
        self.attrs = {'threshold': threshold}
771
        self.outputs = {'Out': out}
772 773

    def test_check_grad(self):
774 775
        if self.dtype == np.float16:
            return
776
        self.check_grad(['X'], 'Out')
777 778


C
chengduo 已提交
779
class TestHardSigmoid(TestActivation):
780 781
    def setUp(self):
        self.op_type = "hard_sigmoid"
782 783
        self.init_dtype()

Z
zhupengyang 已提交
784
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
785 786 787 788 789
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
790 791
        self.delta = 0.005

792
        # Same reason as TestAbs
Z
zhupengyang 已提交
793 794
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
795 796

        temp = X * slope + offset
797 798 799 800
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
801 802

    def test_check_grad(self):
803 804
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
805
        self.check_grad(['X'], 'Out')
806

807

C
chengduo 已提交
808
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
809 810
    def setUp(self):
        self.op_type = "swish"
811 812 813 814 815 816 817 818 819
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
820 821

    def test_check_grad(self):
822 823
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
824
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
825

826

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
846 847 848 849 850 851 852 853 854 855
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
856

C
chengduo 已提交
857
        def test_check_output(self):
858
            place = core.CUDAPlace(0)
C
chengduo 已提交
859 860 861
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
862

C
chengduo 已提交
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
887
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
888
create_test_act_fp16_class(TestSin)
889 890
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
891 892
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
893
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
894 895 896 897 898 899 900 901
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
902
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
903 904 905 906 907 908
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
909
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
910

Q
qijun 已提交
911 912
if __name__ == "__main__":
    unittest.main()