test_activation_op.py 123.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
C
Clementine 已提交
18
from scipy.special import expit, erf
J
joejiong 已提交
19

20
from op_test import OpTest, convert_float_to_uint16
21
import paddle
22
import paddle.nn.functional as F
J
joejiong 已提交
23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
25
from paddle.fluid import Program, program_guard
26
from paddle.fluid.framework import _test_eager_guard
Q
qijun 已提交
27

28 29
paddle.enable_static()

Q
qijun 已提交
30

31
class TestSqrtOpError(unittest.TestCase):
32

Z
Zhaolong Xing 已提交
33 34 35 36 37 38
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
39 40 41
            in2 = fluid.layers.data(name='input2',
                                    shape=[12, 10],
                                    dtype="int32")
Z
Zhaolong Xing 已提交
42 43
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

44 45 46
            in3 = fluid.layers.data(name='input3',
                                    shape=[12, 10],
                                    dtype="float16")
Z
Zhaolong Xing 已提交
47 48 49
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
50
class TestActivation(OpTest):
51

Q
qijun 已提交
52 53
    def setUp(self):
        self.op_type = "exp"
54
        self.init_dtype()
55
        self.init_shape()
56
        self.init_kernel_type()
C
chentianyu03 已提交
57 58
        self.check_eager = True
        self.python_api = paddle.exp
59

60
        np.random.seed(2049)
61
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
62 63 64 65
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
66 67

    def test_check_output(self):
68 69 70 71
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
72 73

    def test_check_grad(self):
74 75
        if self.dtype == np.float16:
            return
76 77 78 79
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
80

81
    def init_dtype(self):
82
        self.dtype = np.float64
83

84 85 86
    def init_shape(self):
        self.shape = [11, 17]

87 88 89
    def init_kernel_type(self):
        pass

Q
qijun 已提交
90

91 92 93 94 95 96
class TestActivation_ZeroDim(TestActivation):

    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
97
class TestExpm1(TestActivation):
98

R
ronnywang 已提交
99 100
    def setUp(self):
        self.op_type = "expm1"
101
        self.python_api = paddle.expm1
R
ronnywang 已提交
102
        self.init_dtype()
103
        self.init_shape()
R
ronnywang 已提交
104 105

        np.random.seed(2049)
106
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
107 108 109 110 111 112
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
113 114 115 116
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
117 118


119 120 121 122 123 124
class TestExpm1_ZeroDim(TestExpm1):

    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
125
class TestExpm1API(unittest.TestCase):
126

R
ronnywang 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
150
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
151 152 153 154 155

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
156

R
ronnywang 已提交
157 158 159 160
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
161
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


175
class TestParameter(object):
176

177 178
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
179
            np_x = np.array([0.1])
180
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
181
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
182 183
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
W
WuHaobo 已提交
184 185
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = eval("np.%s(np_x)" % self.op_type)
186
            np.testing.assert_allclose(result, expected, rtol=1e-05)
187 188 189 190 191 192 193

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
194
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
195 196


C
chengduo 已提交
197
class TestSigmoid(TestActivation):
198

Q
qijun 已提交
199 200
    def setUp(self):
        self.op_type = "sigmoid"
201
        self.init_dtype()
202
        self.init_shape()
203

204
        np.random.seed(1024)
205
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
206 207 208 209
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
210

211 212 213
    def init_dtype(self):
        self.dtype = np.float32

214
    def test_check_grad(self):
215 216 217 218
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

219

220 221 222 223 224 225
class TestSigmoid_ZeroDim(TestSigmoid):

    def init_shape(self):
        self.shape = []


226 227 228
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSigmoidBF16(OpTest):
229

230 231 232
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()
233
        self.init_shape()
234 235

        np.random.seed(1024)
236
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
237 238 239 240 241 242 243 244 245 246
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

247 248 249
    def init_shape(self):
        self.shape = [11, 17]

250 251 252 253 254 255 256 257 258
    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


259 260 261 262 263 264 265 266
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
267
class TestSilu(TestActivation):
268

M
minghaoBD 已提交
269 270 271
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()
272
        self.init_shape()
M
minghaoBD 已提交
273 274

        np.random.seed(1024)
275
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


290 291 292 293 294 295
class TestSilu_ZeroDim(TestSilu):

    def init_shape(self):
        self.shape = []


M
minghaoBD 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
        self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
314
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
315 316 317 318 319 320 321 322 323

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
324
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
325 326 327 328 329 330 331
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
332 333 334
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
M
minghaoBD 已提交
335 336
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
337 338 339
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
M
minghaoBD 已提交
340 341 342
            F.silu(x_fp16)


C
chengduo 已提交
343
class TestLogSigmoid(TestActivation):
344

345 346
    def setUp(self):
        self.op_type = "logsigmoid"
347
        self.init_dtype()
348
        self.init_shape()
349

350
        np.random.seed(2048)
351
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
352 353
        out = np.log(1 / (1 + np.exp(-x)))

354
        self.inputs = {'X': x}
355
        self.outputs = {'Out': out}
356 357

    def test_check_grad(self):
358 359
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
360
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
361 362


363 364 365 366 367 368
class TestLogSigmoid_ZeroDim(TestLogSigmoid):

    def init_shape(self):
        self.shape = []


369
class TestLogSigmoidAPI(unittest.TestCase):
370
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
371
    def setUp(self):
372
        np.random.seed(1024)
373
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
J
joejiong 已提交
374
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
375 376 377
            else paddle.CPUPlace()

    def test_static_api(self):
378
        paddle.enable_static()
379
        with paddle.static.program_guard(paddle.static.Program()):
380
            x = paddle.fluid.data('X', [11, 17])
381
            out1 = F.log_sigmoid(x)
382 383 384 385 386 387
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
388
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
389 390 391 392

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
393
        out1 = F.log_sigmoid(x)
394 395 396 397
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
398
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
399 400
        paddle.enable_static()

401
    def test_fluid_api(self):
402
        paddle.enable_static()
403
        with paddle.static.program_guard(paddle.static.Program()):
404
            x = paddle.fluid.data('X', [11, 17])
405 406 407 408
            out = paddle.fluid.layers.logsigmoid(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
409
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
410

411
    def test_errors(self):
412
        paddle.enable_static()
413 414
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
415
            self.assertRaises(TypeError, F.log_sigmoid, 1)
416
            # The input dtype must be float16, float32, float64.
417 418 419
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
420
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
421
            # support the input dtype is float16
422 423 424
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
425
            F.log_sigmoid(x_fp16)
426 427


428
class TestTanh(TestActivation, TestParameter):
429

430 431
    def setUp(self):
        self.op_type = "tanh"
432
        self.init_dtype()
433 434
        self.init_shape()

435
        np.random.seed(1024)
436
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
437 438 439 440
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
441 442

    def test_check_grad(self):
443 444
        if self.dtype == np.float16:
            return
445
        self.check_grad(['X'], 'Out')
446

447 448 449 450 451 452
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

453

454 455 456 457 458 459
class TestTanh_ZeroDim(TestTanh):

    def init_shape(self):
        self.shape = []


W
WangXi 已提交
460 461 462 463
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
464
        np.random.seed(1024)
W
WangXi 已提交
465
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
J
joejiong 已提交
466
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
W
WangXi 已提交
467
            else paddle.CPUPlace()
468 469 470 471
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
472 473

    def test_static_api(self):
474
        paddle.enable_static()
W
WangXi 已提交
475
        with paddle.static.program_guard(paddle.static.Program()):
476
            x = paddle.fluid.data('X', [10, 12], self.dtype)
477
            out1 = self.tanh(x)
W
WangXi 已提交
478 479 480 481 482 483
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
484
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
485 486 487

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
488
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
489 490 491 492 493 494
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
495
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
496 497 498
        paddle.enable_static()

    def test_fluid_api(self):
499
        paddle.enable_static()
W
WangXi 已提交
500 501 502 503 504 505
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12], self.dtype)
            out = fluid.layers.tanh(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tanh(self.x_np)
506
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
W
WangXi 已提交
507 508

    def test_errors(self):
509
        paddle.enable_static()
W
WangXi 已提交
510 511
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
512
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
513
            # The input dtype must be float16, float32.
514 515 516
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
517
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
518
            # support the input dtype is float16
519 520 521
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
522 523 524 525 526 527 528
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
529 530


531
class TestAtan(TestActivation, TestParameter):
532

533 534 535
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
536
        self.init_shape()
537

538
        np.random.seed(1024)
539
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
540 541 542 543 544 545 546 547
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
548
        self.check_grad(['X'], 'Out')
549

W
WuHaobo 已提交
550 551 552 553 554 555 556 557 558 559 560
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result, = exe.run(feed={"X": np_x}, fetch_list=[out])
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

561 562 563 564 565 566 567 568
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

569

570 571 572 573 574 575
class TestAtan_ZeroDim(TestTanh):

    def init_shape(self):
        self.shape = []


576
class TestSinh(TestActivation):
577

578 579 580
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
581
        self.init_shape()
582

583
        np.random.seed(1024)
584
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
585 586 587 588 589 590 591 592 593 594
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

595 596 597 598 599 600 601 602 603

class TestSinh_ZeroDim(TestSinh):

    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):

604 605 606 607 608 609
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.sinh(x).numpy()
            z_expected = np.sinh(np_x)
610
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
611 612 613 614 615 616

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
617 618 619 620
            data_x = fluid.layers.data(name="data_x",
                                       shape=test_data_shape,
                                       append_batch_size=False,
                                       dtype="float32")
621 622 623 624

            pd_sinh_out = fluid.layers.sinh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
625 626 627
            np_sinh_res, = exe.run(fluid.default_main_program(),
                                   feed={"data_x": input_x},
                                   fetch_list=[pd_sinh_out])
628 629

        expected_res = np.sinh(input_x)
630
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.sinh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
646

647 648 649 650 651 652 653 654 655 656 657 658 659
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.sinh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.sinh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.sinh(x_fp16)


class TestCosh(TestActivation):
660

661 662 663
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
664
        self.init_shape()
665

666
        np.random.seed(1024)
667
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
668 669 670 671 672 673 674 675 676 677
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

678 679 680 681 682 683 684 685 686

class TestCosh_ZeroDim(TestCosh):

    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):

687 688 689 690 691 692
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = fluid.layers.cosh(x).numpy()
            z_expected = np.cosh(np_x)
693
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
694 695 696 697 698 699

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
700 701 702 703
            data_x = fluid.layers.data(name="data_x",
                                       shape=test_data_shape,
                                       append_batch_size=False,
                                       dtype="float32")
704 705 706 707

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
708 709 710
            np_cosh_res, = exe.run(fluid.default_main_program(),
                                   feed={"data_x": input_x},
                                   fetch_list=[pd_cosh_out])
711 712

        expected_res = np.cosh(input_x)
713
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
            loss = fluid.layers.cosh(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
729

730 731 732 733 734 735 736 737 738 739 740 741
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.cosh, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.cosh, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.cosh(x_fp16)


742 743 744 745 746 747
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
748

K
Kavya Srinet 已提交
749 750
    def setUp(self):
        self.op_type = "tanh_shrink"
751
        self.init_dtype()
752
        self.init_shape()
753

754
        np.random.seed(1024)
755
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
756
        out = ref_tanhshrink(x)
757

758
        self.inputs = {'X': x}
759
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
760 761

    def test_check_grad(self):
762 763
        if self.dtype == np.float16:
            return
764
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
765

766

767 768 769 770 771 772
class TestTanhshrink_ZeroDim(TestTanhshrink):

    def init_shape(self):
        self.shape = []


773 774 775
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
776
        np.random.seed(1024)
777
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
J
joejiong 已提交
778
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
779 780 781
            else paddle.CPUPlace()

    def test_static_api(self):
782
        paddle.enable_static()
783
        with paddle.static.program_guard(paddle.static.Program()):
784
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
785 786 787 788 789 790 791
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
792
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
793 794 795 796 797 798 799 800 801

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
802
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
803 804 805
        paddle.enable_static()

    def test_fluid_api(self):
806
        paddle.enable_static()
807 808 809 810 811 812
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.tanh_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_tanhshrink(self.x_np)
813
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
814 815

    def test_errors(self):
816
        paddle.enable_static()
817 818 819 820
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
821 822 823
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
824 825
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
826 827 828
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
829 830 831
            F.tanhshrink(x_fp16)


832 833 834 835 836 837
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
838
class TestHardShrink(TestActivation):
839

840 841
    def setUp(self):
        self.op_type = "hard_shrink"
842
        self.init_dtype()
843
        self.init_shape()
844

845 846
        self.threshold = 0.5
        self.set_attrs()
847
        np.random.seed(1024)
848
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
849
        out = ref_hardshrink(x, self.threshold)
850

851
        self.attrs = {'threshold': self.threshold}
852
        self.inputs = {'X': x}
853
        self.outputs = {'Out': out}
854

855 856 857
    def init_shape(self):
        self.shape = [10, 12]

858 859 860
    def set_attrs(self):
        pass

861
    def test_check_grad(self):
862 863
        if self.dtype == np.float16:
            return
864
        self.check_grad(['X'], 'Out')
865 866


867
class TestHardShrink_threshold_negative(TestHardShrink):
868

869 870 871 872
    def set_attrs(self):
        self.threshold = -0.1


873 874 875 876 877 878 879 880
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


881 882 883
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
884
        np.random.seed(1024)
885
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
886
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
887 888 889
            else paddle.CPUPlace()

    def test_static_api(self):
890
        paddle.enable_static()
891
        with paddle.static.program_guard(paddle.static.Program()):
892
            x = paddle.fluid.data('X', [10, 12])
893 894 895 896 897 898 899
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
900
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
901 902 903

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
904
        x = paddle.to_tensor(self.x_np)
905 906 907 908 909
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
910
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
911 912 913 914 915 916

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
917
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
918 919 920
        paddle.enable_static()

    def test_fluid_api(self):
921
        paddle.enable_static()
922 923 924 925 926 927
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.hard_shrink(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardshrink(self.x_np, 0.5)
928
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
929

930
    def test_errors(self):
931
        paddle.enable_static()
932
        with paddle.static.program_guard(paddle.static.Program()):
933
            # The input type must be Variable.
934
            self.assertRaises(TypeError, F.hardshrink, 1)
935
            # The input dtype must be float16, float32, float64.
936 937 938
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
939
            self.assertRaises(TypeError, F.hardshrink, x_int32)
940
            # support the input dtype is float16
941 942 943
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
944
            F.hardshrink(x_fp16)
945 946


947 948 949 950 951 952 953 954 955 956 957
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
958
        np.random.seed(1024)
959
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
J
joejiong 已提交
960
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
961 962 963
            else paddle.CPUPlace()

    def test_static_api(self):
964
        paddle.enable_static()
965
        with paddle.static.program_guard(paddle.static.Program()):
966
            x = paddle.fluid.data('X', [10, 12])
967 968 969 970 971 972 973
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
974
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
975 976 977

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
978
        x = paddle.to_tensor(self.x_np)
979 980 981 982 983
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
984
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
985 986 987 988 989 990

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
991
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
992 993 994
        paddle.enable_static()

    def test_errors(self):
995
        paddle.enable_static()
996 997 998 999
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
1000 1001 1002
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1003 1004
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
1005 1006 1007
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1008 1009 1010
            F.hardtanh(x_fp16)


1011 1012 1013 1014 1015 1016 1017 1018
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
        out - threshold)
    return out


class TestSoftshrink(TestActivation):
1019

1020 1021
    def setUp(self):
        self.op_type = "softshrink"
1022 1023
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
1024
        self.init_dtype()
1025
        self.init_shape()
1026

1027
        threshold = 0.8
1028

1029
        np.random.seed(1023)
1030
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1031 1032 1033
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
1034
        self.outputs = {'Out': out}
1035 1036

    def test_check_grad(self):
1037 1038
        if self.dtype == np.float16:
            return
1039
        self.check_grad(['X'], 'Out', check_eager=True)
1040

1041

1042 1043 1044 1045 1046 1047
class TestSoftshrink_ZeroDim(TestSoftshrink):

    def init_shape(self):
        self.shape = []


1048 1049 1050 1051
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1052
        np.random.seed(1024)
1053
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
J
joejiong 已提交
1054
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1055 1056 1057
            else paddle.CPUPlace()

    def test_static_api(self):
1058
        paddle.enable_static()
1059
        with paddle.static.program_guard(paddle.static.Program()):
1060
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1061 1062 1063 1064 1065 1066 1067
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1068
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1069 1070 1071 1072 1073 1074 1075 1076 1077

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1078
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1079 1080 1081
        paddle.enable_static()

    def test_fluid_api(self):
1082
        paddle.enable_static()
1083 1084 1085 1086 1087 1088
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softshrink(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softshrink(self.x_np, self.threshold)
1089
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1090

1091
    def test_errors(self):
1092
        paddle.enable_static()
1093
        with paddle.static.program_guard(paddle.static.Program()):
1094
            # The input type must be Variable.
1095
            self.assertRaises(TypeError, F.softshrink, 1)
1096
            # The input dtype must be float16, float32, float64.
1097 1098 1099
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1100
            self.assertRaises(TypeError, F.softshrink, x_int32)
1101
            # The threshold must be no less than zero
1102 1103 1104
            x_fp32 = paddle.fluid.data(name='x_fp32',
                                       shape=[12, 10],
                                       dtype='float32')
1105
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1106
            # support the input dtype is float16
1107 1108 1109
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1110
            F.softshrink(x_fp16)
1111 1112


1113
class TestSqrt(TestActivation, TestParameter):
1114

1115 1116
    def setUp(self):
        self.op_type = "sqrt"
1117
        self.python_api = paddle.sqrt
1118
        self.init_dtype()
1119
        self.init_shape()
1120

1121
        np.random.seed(1023)
1122
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1123 1124 1125 1126
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1127 1128

    def test_check_grad(self):
1129 1130
        if self.dtype == np.float16:
            return
1131 1132 1133 1134
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1135

1136

1137 1138 1139 1140 1141 1142
class TestSqrt_ZeroDim(TestSqrt):

    def init_shape(self):
        self.shape = []


1143 1144 1145
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSqrtBF16(OpTest):
1146

1147 1148
    def setUp(self):
        self.op_type = "sqrt"
1149
        self.python_api = paddle.sqrt
1150
        self.init_dtype()
1151
        self.init_shape()
1152 1153

        np.random.seed(1023)
1154
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1165 1166 1167
    def init_shape(self):
        self.shape = [11, 17]

1168 1169
    def test_check_output(self):
        place = core.CUDAPlace(0)
1170
        self.check_output_with_place(place, check_eager=True)
1171 1172 1173

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1174
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1175 1176


Z
zhoukunsheng 已提交
1177
class TestRsqrt(TestActivation):
1178

Z
zhoukunsheng 已提交
1179 1180
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1181
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1182
        self.init_dtype()
1183
        self.init_shape()
Z
zhoukunsheng 已提交
1184

1185
        np.random.seed(1024)
1186
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1187 1188 1189 1190 1191
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1192 1193 1194
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1195 1196 1197
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1198 1199 1200 1201
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.0005,
                        check_eager=True)
Z
zhoukunsheng 已提交
1202 1203


1204 1205 1206 1207 1208 1209 1210 1211
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1212
class TestAbs(TestActivation):
1213

1214 1215
    def setUp(self):
        self.op_type = "abs"
1216
        self.init_dtype()
1217
        self.init_shape()
1218

1219
        np.random.seed(1024)
1220
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1221
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1222
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1223
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1224 1225
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1226 1227 1228 1229
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1230

1231 1232 1233
    def init_shape(self):
        self.shape = [4, 25]

1234
    def test_check_grad(self):
1235 1236
        if self.dtype == np.float16:
            return
1237
        self.check_grad(['X'], 'Out', check_eager=False)
1238

1239

1240 1241 1242 1243 1244 1245
class TestAbs_ZeroDim(TestAbs):

    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1246
class TestCeil(TestActivation):
1247

D
dzhwinter 已提交
1248 1249
    def setUp(self):
        self.op_type = "ceil"
1250 1251
        self.check_eager = True
        self.python_api = paddle.ceil
1252
        self.init_dtype()
1253
        self.init_shape()
1254

1255
        np.random.seed(1024)
1256
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1257 1258 1259 1260
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1261

1262 1263 1264
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1265
    # The same reason with TestFloor
C
chengduo 已提交
1266
    def test_check_grad(self):
1267 1268 1269
        pass


1270 1271 1272 1273 1274 1275
class TestCeil_ZeroDim(TestCeil):

    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1276
class TestFloor(TestActivation):
1277

D
dzhwinter 已提交
1278 1279
    def setUp(self):
        self.op_type = "floor"
1280 1281
        self.check_eager = True
        self.python_api = paddle.floor
1282
        self.init_dtype()
1283
        self.init_shape()
1284

1285
        np.random.seed(1024)
1286
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1287 1288 1289 1290
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1291

1292 1293 1294
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1295
    # the gradient on floor, ceil, round is undefined.
1296
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1297 1298
    # The same reason with TestFloor
    def test_check_grad(self):
1299 1300 1301
        pass


1302 1303 1304 1305 1306 1307
class TestFloor_ZeroDim(TestFloor):

    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1308
class TestCos(TestActivation):
1309

C
add cos  
chengduoZH 已提交
1310 1311
    def setUp(self):
        self.op_type = "cos"
1312
        self.init_dtype()
1313
        self.init_shape()
1314

1315
        np.random.seed(1024)
1316
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1317 1318 1319 1320
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1321

1322 1323 1324
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1325
    def test_check_grad(self):
1326 1327
        if self.dtype == np.float16:
            return
1328
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1329

1330

1331 1332 1333 1334 1335 1336
class TestCos_ZeroDim(TestCos):

    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1337
class TestTan(TestActivation):
1338

J
joejiong 已提交
1339 1340 1341 1342
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1343 1344
        self.init_shape()

J
joejiong 已提交
1345
        self.dtype = 'float32'
1346
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
1347 1348 1349 1350 1351 1352 1353 1354
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1355 1356 1357
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1358 1359 1360 1361 1362
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378

class TestTan_ZeroDim(TestTan):

    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):

    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

J
joejiong 已提交
1379 1380 1381 1382 1383
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1384
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1385 1386 1387 1388 1389
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1390
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1391 1392 1393 1394
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1395
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
            input_x = np.random.uniform(0.1, 1,
                                        test_data_shape).astype("float32")
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1410
class TestAcos(TestActivation):
1411

1412 1413 1414
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1415
        self.init_shape()
1416

1417
        np.random.seed(1024)
1418
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1419 1420 1421 1422 1423
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1424 1425 1426
    def init_shape(self):
        self.shape = [10, 12]

1427 1428 1429
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1430
        self.check_grad(['X'], 'Out')
1431 1432


1433 1434 1435 1436 1437 1438
class TestAcos_ZeroDim(TestAcos):

    def init_shape(self):
        self.shape = []


1439
class TestSin(TestActivation, TestParameter):
1440

C
add sin  
chengduoZH 已提交
1441 1442
    def setUp(self):
        self.op_type = "sin"
1443
        self.init_dtype()
1444
        self.init_shape()
1445

1446
        np.random.seed(1024)
1447
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1448 1449 1450 1451
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1452

1453 1454 1455
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1456
    def test_check_grad(self):
1457 1458
        if self.dtype == np.float16:
            return
1459
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1460 1461


1462 1463 1464 1465 1466 1467
class TestSin_ZeroDim(TestSin):

    def init_shape(self):
        self.shape = []


1468
class TestAsin(TestActivation):
1469

1470 1471 1472
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1473
        self.init_shape()
1474

1475
        np.random.seed(2048)
1476
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1477 1478 1479 1480 1481
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1482 1483 1484
    def init_shape(self):
        self.shape = [10, 12]

1485 1486 1487
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1488
        self.check_grad(['X'], 'Out')
1489 1490


1491 1492 1493 1494 1495 1496
class TestAsin_ZeroDim(TestAsin):

    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1497
class TestAcosh(TestActivation):
1498

X
xiaoting 已提交
1499 1500 1501
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1502
        self.init_shape()
X
xiaoting 已提交
1503 1504

        np.random.seed(1024)
1505
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1506 1507 1508 1509 1510
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1511 1512 1513
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1514 1515 1516 1517 1518 1519
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1520 1521 1522 1523 1524 1525
class TestAcosh_ZeroDim(TestAcosh):

    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1526
class TestAsinh(TestActivation):
1527

X
xiaoting 已提交
1528 1529 1530
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1531
        self.init_shape()
X
xiaoting 已提交
1532 1533

        np.random.seed(1024)
1534
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1535 1536 1537 1538 1539
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1540 1541 1542
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1543 1544 1545 1546 1547 1548
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1549 1550 1551 1552 1553 1554
class TestAsinh_ZeroDim(TestAsinh):

    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1555
class TestAtanh(TestActivation):
1556

X
xiaoting 已提交
1557 1558 1559
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1560
        self.init_shape()
X
xiaoting 已提交
1561 1562

        np.random.seed(400)
1563
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1564 1565 1566 1567 1568
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1569 1570 1571
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1572 1573 1574 1575 1576 1577
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1578 1579 1580 1581 1582 1583
class TestAtanh_ZeroDim(TestAtanh):

    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1584
class TestRound(TestActivation):
1585

D
dzhwinter 已提交
1586 1587
    def setUp(self):
        self.op_type = "round"
1588 1589
        self.check_eager = True
        self.python_api = paddle.round
1590
        self.init_dtype()
1591
        self.init_shape()
1592

1593
        np.random.seed(1024)
1594
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1595 1596 1597 1598
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1599

1600 1601 1602
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1603
    def test_check_grad(self):
1604 1605 1606
        pass


1607 1608 1609 1610 1611 1612
class TestRound_ZeroDim(TestRound):

    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1613
class TestRelu(TestActivation):
1614

1615
    def setUp(self):
Q
qijun 已提交
1616
        self.op_type = "relu"
K
Kexin Zhao 已提交
1617
        self.init_dtype()
1618
        self.init_shape()
K
Kexin Zhao 已提交
1619

1620
        np.random.seed(1024)
1621
        if self.dtype == np.uint16:
1622
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1623 1624 1625 1626 1627
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1628
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1629 1630 1631 1632
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1633 1634

        self.outputs = {'Out': out}
1635 1636

    def test_check_grad(self):
K
Kexin Zhao 已提交
1637 1638
        if self.dtype == np.float16:
            return
1639
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1640 1641


1642 1643 1644 1645 1646 1647
class TestRelu_ZeroDim(TestRelu):

    def init_shape(self):
        self.shape = []


1648 1649 1650
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1651
        np.random.seed(1024)
1652
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
1653
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1654
            else paddle.CPUPlace()
1655 1656 1657 1658
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1659 1660

    def test_static_api(self):
1661
        paddle.enable_static()
1662
        with paddle.static.program_guard(paddle.static.Program()):
1663
            x = paddle.fluid.data('X', [10, 12])
1664
            out1 = self.relu(x)
1665 1666 1667 1668 1669 1670
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1671
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1672 1673 1674 1675 1676

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1677 1678
        out1 = m(x)
        out2 = self.relu(x)
1679 1680
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1681
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1682 1683
        paddle.enable_static()

1684
    def test_errors(self):
1685
        paddle.enable_static()
1686
        with paddle.static.program_guard(paddle.static.Program()):
1687
            # The input type must be Variable.
1688
            self.assertRaises(TypeError, self.relu, 1)
1689
            # The input dtype must be float16, float32, float64.
1690 1691 1692
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
1693
            self.assertRaises(TypeError, self.relu, x_int32)
1694
            # support the input dtype is float16
1695 1696 1697
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
1698 1699 1700 1701 1702 1703 1704
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1705 1706


1707 1708 1709 1710 1711 1712
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1713
class TestLeakyRelu(TestActivation):
1714

1715 1716 1717
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1718 1719 1720
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1721
        self.init_shape()
1722
        alpha = self.get_alpha()
A
Adam 已提交
1723

1724
        np.random.seed(1024)
1725
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1726
        # The same reason with TestAbs
1727 1728
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1729

1730
        self.inputs = {'X': x}
A
Adam 已提交
1731
        self.outputs = {'Out': out}
1732
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1733 1734 1735 1736

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1737
        self.check_grad(['X'], 'Out')
1738 1739


1740
class TestLeakyReluAlpha1(TestLeakyRelu):
1741

1742 1743 1744 1745 1746
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
1747

1748 1749 1750 1751 1752
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
1753

1754 1755 1756 1757
    def get_alpha(self):
        return -2.0


1758 1759 1760 1761 1762 1763
class TestLeakyRelu_ZeroDim(TestLeakyRelu):

    def init_shape(self):
        self.shape = []


1764 1765 1766 1767
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    # fluid.layers.leaky_relu
    def setUp(self):
1768
        np.random.seed(1024)
1769
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
J
joejiong 已提交
1770
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1771 1772 1773
            else paddle.CPUPlace()

    def test_static_api(self):
1774
        paddle.enable_static()
1775
        with paddle.static.program_guard(paddle.static.Program()):
1776
            x = paddle.fluid.data('X', [10, 12])
1777 1778 1779 1780 1781 1782 1783
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1784
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1785 1786 1787

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1788
        x = paddle.to_tensor(self.x_np)
1789 1790 1791 1792 1793
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1794
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1795 1796 1797 1798 1799 1800

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1801
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1802 1803 1804
        paddle.enable_static()

    def test_fluid_api(self):
1805
        paddle.enable_static()
1806 1807 1808 1809 1810 1811
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.leaky_relu(x, 0.01)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_leaky_relu(self.x_np)
1812
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1813

1814
    def test_errors(self):
1815
        paddle.enable_static()
1816
        with paddle.static.program_guard(paddle.static.Program()):
1817
            # The input type must be Variable.
1818
            self.assertRaises(TypeError, F.leaky_relu, 1)
1819
            # The input dtype must be float16, float32, float64.
1820 1821 1822
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
1823 1824
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1825 1826 1827
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
1828
            F.leaky_relu(x_fp16)
1829 1830


1831 1832
def gelu(x, approximate):
    if approximate:
1833 1834
        y_ref = 0.5 * x * (
            1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
1835 1836 1837 1838 1839 1840
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
1841

C
Clementine 已提交
1842 1843 1844
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1845
        self.init_shape()
1846
        approximate = True
1847
        np.random.seed(1024)
1848
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1849
        out = gelu(x, approximate)
C
Clementine 已提交
1850

1851
        self.inputs = {'X': x}
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
1862

1863 1864 1865
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1866
        self.init_shape()
1867
        approximate = False
1868
        np.random.seed(2048)
1869
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1870
        out = gelu(x, approximate)
C
Clementine 已提交
1871

1872
        self.inputs = {'X': x}
C
Clementine 已提交
1873
        self.outputs = {'Out': out}
1874
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1875 1876 1877 1878

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1879
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1880 1881


1882 1883 1884 1885 1886 1887
class TestGelu_ZeroDim(TestGelu):

    def init_shape(self):
        self.shape = []


1888 1889 1890
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1891
        np.random.seed(1024)
1892
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
J
joejiong 已提交
1893
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1894 1895 1896
            else paddle.CPUPlace()

    def test_static_api(self):
1897
        paddle.enable_static()
1898
        with paddle.static.program_guard(paddle.static.Program()):
1899
            x = paddle.fluid.data('X', [11, 17])
1900 1901 1902 1903 1904 1905 1906
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1907
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1908 1909 1910 1911 1912 1913 1914 1915 1916

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1917
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1918 1919 1920 1921 1922 1923

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1924
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1925 1926 1927
        paddle.enable_static()

    def test_errors(self):
1928
        paddle.enable_static()
1929 1930 1931 1932
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1933 1934 1935
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[11, 17],
                                        dtype='int32')
1936 1937
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1938 1939 1940
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[11, 17],
                                       dtype='float16')
1941 1942 1943
            F.gelu(x_fp16)


C
chengduo 已提交
1944
class TestBRelu(TestActivation):
1945

1946 1947
    def setUp(self):
        self.op_type = "brelu"
1948 1949
        self.init_dtype()

1950
        np.random.seed(1024)
Z
zhupengyang 已提交
1951
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1952 1953
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1954 1955
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1956
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1957 1958 1959
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1960 1961 1962

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1963
        self.outputs = {'Out': t}
1964 1965

    def test_check_grad(self):
1966 1967
        if self.dtype == np.float16:
            return
1968
        self.check_grad(['X'], 'Out')
1969

1970

1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
class TestBreluAPI(unittest.TestCase):
    # test paddle.fluid.layers.brelu
    def setUp(self):
        np.random.seed(1024)
        self.t_min = 0.
        self.t_max = 24.
        self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32')
        self.out_ref = np.copy(self.x_np)
        self.out_ref[self.out_ref < self.t_min] = self.t_min
        self.out_ref[self.out_ref > self.t_max] = self.t_max
        self.out_ref = self.out_ref.astype('float32')
J
joejiong 已提交
1982
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
1983 1984 1985 1986 1987 1988 1989 1990
            else paddle.CPUPlace()

    def test_fluid_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12])
            out = paddle.fluid.layers.brelu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
1991
            np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05)
1992 1993 1994 1995

            paddle.disable_static(self.place)
            x = paddle.to_tensor(self.x_np)
            out = paddle.fluid.layers.brelu(x)
1996
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
1997 1998
            paddle.enable_static()

1999 2000 2001 2002 2003 2004 2005 2006
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
2007 2008 2009
            x_fp16 = fluid.layers.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2010 2011 2012
            fluid.layers.brelu(x_fp16)


2013 2014 2015 2016 2017 2018 2019
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2020
class TestRelu6(TestActivation):
2021

K
Kavya Srinet 已提交
2022
    def setUp(self):
2023
        self.op_type = "relu6"
2024
        self.init_dtype()
2025
        self.init_shape()
2026
        self.python_api = paddle.nn.functional.relu6
2027

2028
        np.random.seed(1024)
2029
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2030
        x[np.abs(x) < 0.005] = 0.02
2031
        out = ref_relu6(x)
2032

2033 2034
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
2035
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
2036

2037 2038 2039
    def init_shape(self):
        self.shape = [10, 12]

2040 2041 2042
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2043
        self.check_grad(['X'], 'Out', check_eager=True)
2044 2045


2046 2047 2048 2049 2050 2051
class TestRelu6_ZeroDim(TestRelu6):

    def init_shape(self):
        self.shape = []


2052 2053 2054
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2055
        np.random.seed(1024)
2056 2057
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
J
joejiong 已提交
2058
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2059 2060 2061
            else paddle.CPUPlace()

    def test_static_api(self):
2062
        paddle.enable_static()
2063
        with paddle.static.program_guard(paddle.static.Program()):
2064
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2065 2066 2067 2068 2069 2070 2071
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
2072
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2073 2074 2075 2076 2077 2078 2079 2080 2081

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2082
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2083 2084 2085
        paddle.enable_static()

    def test_fluid_api(self):
2086
        paddle.enable_static()
2087 2088 2089 2090 2091 2092
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.relu6(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
2093
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2094

2095
    def test_errors(self):
2096
        paddle.enable_static()
2097
        with paddle.static.program_guard(paddle.static.Program()):
2098
            # The input type must be Variable.
2099
            self.assertRaises(TypeError, F.relu6, 1)
2100
            # The input dtype must be float16, float32, float64.
2101 2102 2103
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2104
            self.assertRaises(TypeError, F.relu6, x_int32)
2105
            # support the input dtype is float16
2106 2107 2108
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2109
            F.relu6(x_fp16)
2110 2111


2112
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2113 2114 2115 2116
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2117
    return (x * np.minimum(np.maximum(x + offset, 0.), threshold) /
Z
Zhang Ting 已提交
2118
            scale).astype(x_dtype)
2119 2120


H
huangjun12 已提交
2121
class TestHardSwish(TestActivation):
2122

H
huangjun12 已提交
2123 2124 2125
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2126
        self.init_shape()
2127
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2128

2129
        np.random.seed(1024)
2130
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2131 2132 2133 2134 2135 2136
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2137
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2138

2139
        self.inputs = {'X': x}
H
huangjun12 已提交
2140 2141 2142
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

2143 2144 2145
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2146
    def test_check_grad(self):
2147 2148 2149 2150
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
2151 2152


2153 2154 2155 2156 2157 2158
class TestHardSwish_ZeroDim(TestHardSwish):

    def init_shape(self):
        self.shape = []


2159 2160 2161 2162
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
2163
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2164 2165 2166 2167
            else paddle.CPUPlace()

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2168
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2169 2170 2171 2172 2173 2174 2175
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2176
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2177 2178 2179

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhang Ting 已提交
2180
        x = paddle.to_tensor([11648., 11448.])
2181 2182 2183
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
Z
Zhang Ting 已提交
2184
        out_ref = [11648., 11448.]
2185
        for r in [out1, out2]:
2186
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2187
        paddle.enable_static()
2188 2189 2190 2191 2192 2193 2194 2195

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2196
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2197 2198 2199 2200

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_swish(x)
2201
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2202 2203 2204 2205
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2206
            # The input type must be Variable.
2207
            self.assertRaises(TypeError, F.hardswish, 1)
2208
            # The input dtype must be float16, float32, float64.
2209 2210 2211
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
2212
            self.assertRaises(TypeError, F.hardswish, x_int32)
2213
            # support the input dtype is float16
2214 2215 2216
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
2217
            F.hardswish(x_fp16)
2218

2219 2220 2221 2222 2223
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2224

C
chengduo 已提交
2225
class TestSoftRelu(TestActivation):
2226

2227 2228
    def setUp(self):
        self.op_type = "soft_relu"
2229 2230
        self.init_dtype()

2231
        np.random.seed(4096)
2232
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2233
        threshold = 2.0
Q
qijun 已提交
2234 2235
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2236
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2237 2238 2239
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2240 2241 2242 2243 2244
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2245 2246

    def test_check_grad(self):
2247 2248
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2249
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2250

2251

2252
class TestSoftReluOpError(unittest.TestCase):
2253

2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.soft_relu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.soft_relu, x_int32)
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
            fluid.layers.soft_relu(x_fp16)


2266
def elu(x, alpha):
Z
zhupengyang 已提交
2267
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2268 2269 2270
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2271
class TestELU(TestActivation):
2272

2273 2274
    def setUp(self):
        self.op_type = "elu"
2275
        self.init_dtype()
2276
        self.init_shape()
2277

2278
        np.random.seed(1024)
2279
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2280
        alpha = self.get_alpha()
2281
        out = elu(x, alpha)
2282 2283 2284 2285
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2286
        self.outputs = {'Out': out}
2287

2288 2289 2290
    def init_shape(self):
        self.shape = [10, 12]

2291
    def test_check_grad(self):
2292 2293
        if self.dtype == np.float16:
            return
2294
        self.check_grad(['X'], 'Out')
2295

Z
zhupengyang 已提交
2296 2297 2298 2299 2300
    def get_alpha(self):
        return 1.


class TestELUAlpha(TestELU):
2301

Z
zhupengyang 已提交
2302 2303 2304
    def get_alpha(self):
        return -0.2

2305

2306 2307 2308 2309 2310 2311
class TestELU_ZeroDim(TestELU):

    def init_shape(self):
        self.shape = []


2312 2313 2314
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2315
        np.random.seed(1024)
2316
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
J
joejiong 已提交
2317
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
2318
            else paddle.CPUPlace()
2319 2320 2321 2322
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2323 2324

    def test_static_api(self):
2325
        paddle.enable_static()
2326
        with paddle.static.program_guard(paddle.static.Program()):
2327
            x = paddle.fluid.data('X', [10, 12])
2328
            out1 = self.elu(x)
2329 2330 2331 2332 2333 2334
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2335
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2336 2337 2338 2339

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2340 2341
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2342 2343 2344 2345
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2346
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2347

2348 2349
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2350 2351 2352 2353
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2354
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2355 2356
        paddle.enable_static()

2357
    def test_errors(self):
2358
        paddle.enable_static()
2359 2360
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2361
            self.assertRaises(TypeError, self.elu, 1)
2362
            # The input dtype must be float16, float32, float64.
2363 2364 2365
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
2366
            self.assertRaises(TypeError, self.elu, x_int32)
2367
            # support the input dtype is float16
2368 2369 2370
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
2371 2372 2373
            self.elu(x_fp16)


Z
zhupengyang 已提交
2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2386 2387 2388 2389 2390 2391
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
2392

2393 2394 2395
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2396
        self.init_shape()
2397

2398
        self.python_api = paddle.nn.functional.celu
2399
        np.random.seed(1024)
2400
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2401 2402 2403 2404 2405 2406
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2407 2408 2409
    def init_shape(self):
        self.shape = [10, 12]

2410 2411 2412
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2413
        self.check_grad(['X'], 'Out', check_eager=True)
2414 2415


2416 2417 2418 2419 2420 2421
class TestCELU_ZeroDim(TestCELU):

    def init_shape(self):
        self.shape = []


2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2445
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2446 2447 2448 2449 2450 2451 2452 2453 2454 2455

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2456
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2457 2458 2459 2460 2461 2462 2463

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2464
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2465 2466 2467 2468 2469 2470 2471 2472
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2473 2474 2475
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[10, 12],
                                        dtype='int32')
2476 2477
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2478 2479 2480
            x_fp32 = paddle.fluid.data(name='x_fp32',
                                       shape=[10, 12],
                                       dtype='float32')
2481 2482
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2483 2484 2485
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[10, 12],
                                       dtype='float16')
2486 2487
            self.celu(x_fp16)

2488 2489 2490 2491 2492
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2493

C
chengduo 已提交
2494
class TestReciprocal(TestActivation):
2495

Q
qijun 已提交
2496 2497
    def setUp(self):
        self.op_type = "reciprocal"
2498
        self.python_api = paddle.reciprocal
2499
        self.init_dtype()
2500
        self.init_shape()
2501

2502
        np.random.seed(1024)
2503
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2504 2505 2506 2507
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2508 2509

    def test_check_grad(self):
2510 2511
        if self.dtype == np.float16:
            return
2512 2513 2514 2515
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2516 2517


2518 2519 2520 2521 2522 2523
class TestReciprocal_ZeroDim(TestReciprocal):

    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2524
class TestLog(TestActivation):
2525

Q
qijun 已提交
2526 2527
    def setUp(self):
        self.op_type = "log"
2528 2529
        self.check_eager = True
        self.python_api = paddle.log
2530
        self.init_dtype()
2531
        self.init_shape()
2532

2533
        np.random.seed(1024)
2534
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2535 2536 2537 2538
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2539 2540

    def test_check_grad(self):
2541 2542
        if self.dtype == np.float16:
            return
2543
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2544

2545
    def test_error(self):
2546 2547 2548 2549 2550 2551 2552 2553
        in1 = fluid.layers.data(name="in1",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int32")
        in2 = fluid.layers.data(name="in2",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int64")
2554 2555 2556 2557

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

2558

2559 2560 2561 2562 2563 2564
class TestLog_ZeroDim(TestLog):

    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2565
class TestLog2(TestActivation):
2566

J
joejiong 已提交
2567 2568
    def setUp(self):
        self.op_type = "log2"
2569 2570
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2571
        self.init_dtype()
2572
        self.init_shape()
J
joejiong 已提交
2573

2574
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2575 2576 2577 2578 2579 2580 2581 2582
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2583
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2596 2597 2598
            data_x = paddle.static.data(name="data_x",
                                        shape=[11, 17],
                                        dtype="float64")
J
joejiong 已提交
2599 2600 2601 2602

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2603 2604 2605
            res1, = exe.run(paddle.static.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
J
joejiong 已提交
2606
        expected_res = np.log2(input_x)
2607
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2608 2609 2610 2611 2612 2613 2614 2615

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2616
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2617 2618


2619 2620 2621 2622 2623 2624
class TestLog2_ZeroDim(TestLog2):

    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2625
class TestLog10(TestActivation):
2626

J
joejiong 已提交
2627 2628
    def setUp(self):
        self.op_type = "log10"
2629 2630
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2631
        self.init_dtype()
2632
        self.init_shape()
J
joejiong 已提交
2633

2634
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2635 2636 2637 2638 2639 2640 2641 2642
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2643
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2644

2645 2646 2647 2648 2649 2650 2651 2652 2653

class TestLog10_ZeroDim(TestLog10):

    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):

J
joejiong 已提交
2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
        with paddle.static.program_guard(paddle.static.Program(),
                                         paddle.static.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2665 2666 2667
            data_x = paddle.static.data(name="data_x",
                                        shape=[11, 17],
                                        dtype="float64")
J
joejiong 已提交
2668 2669 2670 2671

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2672 2673 2674
            res1, = exe.run(paddle.static.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
J
joejiong 已提交
2675
        expected_res = np.log10(input_x)
2676
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2677 2678 2679 2680 2681 2682 2683 2684

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2685
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2686 2687


2688
class TestLog1p(TestActivation):
2689

2690 2691
    def setUp(self):
        self.op_type = "log1p"
2692 2693
        self.check_eager = True
        self.python_api = paddle.log1p
2694
        self.init_dtype()
2695
        self.init_shape()
2696

2697
        np.random.seed(1024)
2698
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2699 2700 2701 2702 2703 2704 2705 2706
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2707
        self.check_grad(['X'], 'Out', check_eager=True)
2708

2709 2710 2711 2712 2713 2714 2715 2716 2717

class TestLog1p_ZeroDim(TestLog1p):

    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):

2718 2719 2720
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2721 2722 2723 2724
            data_x = fluid.layers.data(name="data_x",
                                       shape=[11, 17],
                                       append_batch_size=False,
                                       dtype="float64")
2725 2726 2727 2728

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2729 2730 2731
            res1, = exe.run(fluid.default_main_program(),
                            feed={"data_x": input_x},
                            fetch_list=[out1])
2732
        expected_res = np.log1p(input_x)
2733
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2734 2735 2736 2737 2738 2739 2740 2741

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2742
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2743 2744


C
chengduo 已提交
2745
class TestSquare(TestActivation):
2746

Q
qijun 已提交
2747 2748
    def setUp(self):
        self.op_type = "square"
2749
        self.python_api = paddle.square
2750
        self.init_dtype()
2751
        self.init_shape()
2752

2753
        np.random.seed(1024)
2754
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2755 2756 2757 2758
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2759 2760

    def test_check_grad(self):
2761 2762
        if self.dtype == np.float16:
            return
2763 2764 2765 2766
        self.check_grad(['X'],
                        'Out',
                        max_relative_error=0.007,
                        check_eager=True)
2767 2768 2769

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2770

2771

2772 2773 2774 2775 2776 2777
class TestSquare_ZeroDim(TestSquare):

    def init_shape(self):
        self.shape = []


2778 2779 2780
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSquareBF16(OpTest):
2781

2782 2783
    def setUp(self):
        self.op_type = "square"
2784
        self.python_api = paddle.square
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2801
        self.check_output_with_place(place, check_eager=True)
2802 2803 2804

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2805 2806 2807 2808
        self.check_grad_with_place(place, ['X'],
                                   'Out',
                                   numeric_grad_delta=0.5,
                                   check_eager=True)
2809 2810


C
chengduo 已提交
2811
class TestPow(TestActivation):
2812

2813 2814
    def setUp(self):
        self.op_type = "pow"
2815
        self.python_api = paddle.pow
2816
        self.check_eager = True
2817
        self.init_dtype()
2818
        self.init_shape()
2819

2820
        np.random.seed(1024)
2821
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2822 2823 2824
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2825
        self.attrs = {'factor': 3.0}
2826
        self.outputs = {'Out': out}
2827

2828 2829 2830
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2831
    def test_check_grad(self):
2832 2833
        if self.dtype == np.float16:
            return
2834
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2835

2836

2837 2838 2839 2840 2841 2842
class TestPow_ZeroDim(TestPow):

    def init_shape(self):
        self.shape = []


2843
class TestPow_factor_tensor(TestActivation):
2844

2845 2846
    def setUp(self):
        self.op_type = "pow"
2847 2848
        self.check_eager = False
        self.python_api = paddle.pow
2849 2850
        self.init_dtype()

2851
        np.random.seed(1024)
2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2864
        self.check_output(check_eager=self.check_eager)
2865 2866 2867 2868

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2869
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2870 2871 2872

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2873 2874 2875 2876 2877 2878 2879 2880
        x = fluid.layers.data(name="x",
                              shape=[11, 17],
                              append_batch_size=False,
                              dtype="float32")
        res = fluid.layers.data(name="res",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")
2881 2882 2883 2884 2885

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
2886 2887 2888
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2889 2890

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2891
        res_1, res_2, res, res_6 = exe.run(
2892 2893
            fluid.default_main_program(),
            feed={"x": input},
W
WuHaobo 已提交
2894
            fetch_list=[out_1, out_2, res, out_6])
2895

2896 2897 2898
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2899

2900
    def test_error(self):
2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916
        in1 = fluid.layers.data(name="in1",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int32")
        in2 = fluid.layers.data(name="in2",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="int64")
        in3 = fluid.layers.data(name="in3",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float32")
        in4 = fluid.layers.data(name="in4",
                                shape=[11, 17],
                                append_batch_size=False,
                                dtype="float64")
2917 2918 2919 2920 2921 2922 2923 2924

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

2925

2926 2927 2928 2929 2930
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2931
class TestSTanh(TestActivation):
2932

2933 2934 2935 2936 2937 2938
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2939 2940
    def setUp(self):
        self.op_type = "stanh"
2941
        self.init_dtype()
2942 2943
        self.init_shape()

2944 2945
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2946

2947
        np.random.seed(1024)
2948
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2949 2950
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2951

2952
        self.inputs = {'X': x}
2953
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2954
        self.outputs = {'Out': out}
2955

Q
qijun 已提交
2956
    def test_check_grad(self):
2957 2958
        if self.dtype == np.float16:
            return
2959
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2960

2961

2962
class TestSTanhScaleA(TestSTanh):
2963

2964 2965 2966 2967 2968
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
2969

2970 2971 2972 2973
    def get_scale_b(self):
        return 0.5


2974 2975 2976 2977 2978 2979
class TestSTanh_ZeroDim(TestSTanh):

    def init_shape(self):
        self.shape = []


2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
        self.place=paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
3005
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3006 3007 3008 3009 3010 3011 3012

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3013
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
            out = fluid.layers.stanh(x, self.scale_a, self.scale_b)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
3024
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3025

3026
    def test_errors(self):
3027 3028
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3029
            # The input type must be Variable.
3030
            self.assertRaises(TypeError, paddle.stanh, 1)
3031
            # The input dtype must be float16, float32, float64.
3032 3033 3034
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3035
            self.assertRaises(TypeError, paddle.stanh, x_int32)
3036
            # support the input dtype is float16
3037 3038 3039
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3040 3041 3042 3043
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
3044

3045 3046 3047 3048 3049
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
3050

3051 3052
    def get_scale_b(self):
        return 0.5
3053 3054


3055 3056 3057 3058 3059 3060 3061
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
    out = np.select([x_beta <= threshold, x_beta > threshold],
                    [np.log(1 + np.exp(x_beta)) / beta, x])
    return out


C
chengduo 已提交
3062
class TestSoftplus(TestActivation):
3063

K
kexinzhao 已提交
3064 3065
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3066
        self.python_api = paddle.nn.functional.softplus
3067
        self.init_dtype()
3068
        self.init_shape()
3069

3070 3071
        beta = 2
        threshold = 15
3072

3073
        np.random.seed(1024)
3074
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3075 3076 3077
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3078
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3079

W
Wang Bojun 已提交
3080 3081
        self.check_eager = True

3082 3083 3084
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3085
    def test_check_grad(self):
3086 3087
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
3088 3089 3090
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
3091

3092

3093 3094 3095 3096 3097 3098
class TestSoftplus_ZeroDim(TestSoftplus):

    def init_shape(self):
        self.shape = []


3099 3100 3101
@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestSoftplusBF16(OpTest):
3102

3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3129 3130 3131 3132 3133
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3134
        np.random.seed(1024)
3135
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3136
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3137 3138 3139
            else paddle.CPUPlace()

    def test_static_api(self):
3140
        paddle.enable_static()
3141
        with paddle.static.program_guard(paddle.static.Program()):
3142
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3143 3144 3145 3146 3147 3148 3149
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
3150
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3151 3152 3153 3154 3155 3156 3157 3158 3159

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3160
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3161 3162 3163
        paddle.enable_static()

    def test_fluid_api(self):
3164
        paddle.enable_static()
3165 3166 3167 3168 3169 3170
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softplus(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softplus(self.x_np)
3171
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3172 3173

    def test_errors(self):
3174
        paddle.enable_static()
3175 3176 3177 3178
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
3179 3180 3181
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3182 3183
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
3184 3185 3186
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3187 3188 3189 3190 3191 3192 3193 3194
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3195
class TestSoftsign(TestActivation):
3196

3197 3198
    def setUp(self):
        self.op_type = "softsign"
3199
        self.init_dtype()
3200 3201
        self.init_shape()

3202
        self.python_api = paddle.nn.functional.softsign
3203

3204
        np.random.seed(1024)
3205
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3206 3207
        out = ref_softsign(x)
        self.inputs = {'X': x}
3208
        self.outputs = {'Out': out}
3209

3210 3211 3212
    def init_shape(self):
        self.shape = [10, 12]

3213
    def test_check_grad(self):
3214 3215
        if self.dtype == np.float16:
            return
3216
        self.check_grad(['X'], 'Out', check_eager=True)
3217 3218


3219 3220 3221 3222 3223 3224
class TestSoftsign_ZeroDim(TestSoftsign):

    def init_shape(self):
        self.shape = []


3225 3226 3227
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3228
        np.random.seed(1024)
3229
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3230
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3231 3232 3233
            else paddle.CPUPlace()

    def test_static_api(self):
3234
        paddle.enable_static()
3235
        with paddle.static.program_guard(paddle.static.Program()):
3236
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3237 3238 3239 3240 3241 3242 3243
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3244
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3245 3246 3247 3248 3249 3250 3251 3252 3253

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3254
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3255 3256 3257
        paddle.enable_static()

    def test_fluid_api(self):
3258
        paddle.enable_static()
3259 3260 3261 3262 3263 3264
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.softsign(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_softsign(self.x_np)
3265
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3266 3267

    def test_errors(self):
3268
        paddle.enable_static()
3269 3270 3271 3272
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3273 3274 3275
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3276 3277
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3278 3279 3280
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3281 3282 3283
            F.softsign(x_fp16)


3284 3285 3286 3287 3288
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3289
class TestThresholdedRelu(TestActivation):
3290

3291 3292
    def setUp(self):
        self.op_type = "thresholded_relu"
3293
        self.init_dtype()
3294
        self.init_shape()
3295

3296
        threshold = 15
3297

3298
        np.random.seed(1024)
3299
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3300 3301 3302 3303
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3304
        self.outputs = {'Out': out}
3305

3306 3307 3308
    def init_shape(self):
        self.shape = [10, 12]

3309
    def test_check_grad(self):
3310 3311
        if self.dtype == np.float16:
            return
3312
        self.check_grad(['X'], 'Out')
3313 3314


3315 3316 3317 3318 3319 3320
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):

    def init_shape(self):
        self.shape = []


3321 3322 3323 3324 3325 3326 3327
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
J
joejiong 已提交
3328
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3329 3330 3331 3332 3333
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3334
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3335 3336 3337 3338 3339 3340 3341
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3342
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3343 3344 3345 3346 3347 3348 3349 3350 3351

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3352
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.thresholded_relu(x, self.threshold)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
3363
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3364

3365
    def test_errors(self):
3366 3367
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3368
            # The input type must be Variable.
3369
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3370
            # The input dtype must be float16, float32, float64.
3371 3372 3373
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3374
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3375
            # support the input dtype is float16
3376 3377 3378
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3379
            F.thresholded_relu(x_fp16)
3380 3381


3382 3383 3384 3385
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
    return np.maximum(np.minimum(x * slope + offset, 1.), 0.).astype(x.dtype)


C
chengduo 已提交
3386
class TestHardSigmoid(TestActivation):
3387

3388 3389
    def setUp(self):
        self.op_type = "hard_sigmoid"
3390 3391 3392 3393
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3394
        self.init_shape()
3395

3396
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3397 3398
        lower_threshold = -self.offset / self.slope
        upper_threshold = (1. - self.offset) / self.slope
Z
zhupengyang 已提交
3399

3400
        # Same reason as TestAbs
3401 3402 3403
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3404

3405
        out = ref_hardsigmoid(x, self.slope, self.offset)
3406

3407 3408
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3409
        self.outputs = {'Out': out}
3410

3411 3412 3413
    def init_shape(self):
        self.shape = [10, 12]

3414 3415
    def set_attrs(self):
        pass
3416

3417

3418
class TestHardSigmoidFP32(TestHardSigmoid):
3419

3420 3421 3422 3423 3424
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
3425

3426 3427 3428 3429 3430
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3431 3432 3433 3434 3435 3436
class TestHardSigmoid_ZeroDim(TestHardSigmoid):

    def init_shape(self):
        self.shape = []


3437 3438 3439 3440
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3441
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3442 3443 3444 3445
            else paddle.CPUPlace()

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3446
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3447 3448 3449 3450 3451 3452 3453
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3454
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3455 3456 3457 3458 3459 3460 3461 3462 3463

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3464
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3465
        paddle.enable_static()
3466 3467 3468 3469 3470 3471 3472 3473

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_sigmoid(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3474
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3475 3476 3477 3478

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_sigmoid(x)
3479
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3480 3481 3482 3483
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3484
            # The input type must be Variable.
3485
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3486
            # The input dtype must be float16, float32, float64.
3487 3488 3489
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3490
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3491
            # support the input dtype is float16
3492 3493 3494
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3495
            F.hardsigmoid(x_fp16)
3496 3497


3498 3499 3500 3501 3502
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3503
class TestSwish(TestActivation):
3504

A
Abhinav Arora 已提交
3505 3506
    def setUp(self):
        self.op_type = "swish"
3507
        self.python_api = paddle.nn.functional.swish
3508
        self.init_dtype()
3509 3510
        self.init_shape()

3511
        self.check_eager = True
3512

3513
        np.random.seed(1024)
3514
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3515 3516
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3517
        self.attrs = {'beta': 1.0}
3518
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3519

3520 3521 3522
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3523
    def test_check_grad(self):
3524 3525
        if self.dtype == np.float16:
            return
3526 3527 3528 3529
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3530

A
Abhinav Arora 已提交
3531

3532 3533 3534 3535 3536 3537
class TestSwish_ZeroDim(TestSwish):

    def init_shape(self):
        self.shape = []


3538 3539 3540 3541 3542
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
J
joejiong 已提交
3543
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
3544 3545 3546 3547 3548
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3549
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3550 3551 3552 3553 3554 3555 3556
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3557
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3558

3559
    def func_test_dygraph_api(self):
3560 3561 3562 3563 3564 3565 3566
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3567
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3568 3569
        paddle.enable_static()

3570
    def test_dygraph_api(self):
3571
        with _test_eager_guard():
3572 3573
            self.func_test_dygraph_api()
        self.func_test_dygraph_api()
3574

3575 3576 3577 3578 3579 3580 3581 3582
    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3583
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3584

3585
    def test_errors(self):
3586 3587
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3588
            # The input type must be Variable.
3589
            self.assertRaises(TypeError, F.swish, 1)
3590
            # The input dtype must be float16, float32, float64.
3591 3592 3593
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3594
            self.assertRaises(TypeError, F.swish, x_int32)
3595
            # support the input dtype is float16
3596 3597 3598
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3599
            F.swish(x_fp16)
3600 3601


3602 3603 3604 3605 3606 3607 3608
def ref_mish(x, threshold=20.):
    softplus = np.select([x <= threshold, x > threshold],
                         [np.log(1 + np.exp(x)), x])
    return x * np.tanh(softplus)


class TestMish(TestActivation):
3609

3610 3611
    def setUp(self):
        self.op_type = "mish"
3612
        self.python_api = paddle.fluid.layers.nn.mish
3613
        self.init_dtype()
3614
        self.init_shape()
3615 3616

        np.random.seed(1024)
3617
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3618 3619 3620 3621
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3622 3623 3624
    def init_shape(self):
        self.shape = [10, 12]

3625 3626 3627
    def test_check_output(self):
        self.check_output(check_eager=True)

3628 3629 3630
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3631
        self.check_grad(['X'], 'Out', check_eager=True)
3632 3633


3634 3635 3636 3637 3638 3639
class TestMish_ZeroDim(TestMish):

    def init_shape(self):
        self.shape = []


3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
        self.place=paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
            else paddle.CPUPlace()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3659
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3660 3661 3662 3663 3664 3665 3666 3667 3668

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3669
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3670 3671 3672 3673 3674 3675 3676 3677 3678 3679
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.mish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3680
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3681 3682 3683 3684 3685 3686 3687

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3688 3689 3690
            x_int32 = paddle.fluid.data(name='x_int32',
                                        shape=[12, 10],
                                        dtype='int32')
3691 3692
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3693 3694 3695
            x_fp16 = paddle.fluid.data(name='x_fp16',
                                       shape=[12, 10],
                                       dtype='float16')
3696 3697 3698
            F.mish(x_fp16)


3699 3700
#------------------ Test Error Activation----------------------
def create_test_error_class(op_type):
3701

3702
    class TestOpErrors(unittest.TestCase):
3703

3704 3705 3706 3707
        def test_errors(self):
            with program_guard(Program(), Program()):
                op = getattr(fluid.layers, op_type)
                # The input dtype of op_type must be float32, float64.
3708 3709 3710 3711 3712 3713
                in1 = fluid.layers.data(name='input2',
                                        shape=[12, 10],
                                        dtype="int32")
                in2 = fluid.layers.data(name='input3',
                                        shape=[12, 10],
                                        dtype="int64")
3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
                self.assertRaises(TypeError, op, in1)
                self.assertRaises(TypeError, op, in2)

    cls_name = "{0}_{1}".format(op_type, "test_errors")
    TestOpErrors.__name__ = cls_name
    globals()[cls_name] = TestOpErrors


create_test_error_class('acos')
create_test_error_class('asin')
create_test_error_class('atan')
create_test_error_class('ceil')
create_test_error_class('cos')
create_test_error_class('floor')
create_test_error_class('reciprocal')
create_test_error_class('round')
create_test_error_class('rsqrt')
create_test_error_class('sin')
create_test_error_class('sqrt')
create_test_error_class('tanh')
J
joejiong 已提交
3734
create_test_error_class('tan')
X
xiaoting 已提交
3735 3736 3737
create_test_error_class('acosh')
create_test_error_class('asinh')
create_test_error_class('atanh')
3738 3739


3740 3741
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3742

3743 3744 3745
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
3746

3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
3761 3762 3763 3764 3765
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
3766

J
joejiong 已提交
3767
    @unittest.skipIf(not paddle.is_compiled_with_cuda(),
C
chengduo 已提交
3768 3769
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
3770

C
chengduo 已提交
3771 3772
        def init_dtype(self):
            self.dtype = np.float16
3773

C
chengduo 已提交
3774
        def test_check_output(self):
3775
            place = core.CUDAPlace(0)
C
chengduo 已提交
3776 3777 3778
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3779

C
chengduo 已提交
3780 3781 3782 3783
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3784 3785 3786
                self.check_grad_with_place(place, ['X'],
                                           'Out',
                                           max_relative_error=grad_atol)
C
chengduo 已提交
3787 3788 3789 3790 3791 3792 3793

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3794
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3795
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3796
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3797 3798
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3799
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3800
create_test_act_fp16_class(TestHardShrink)
3801
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3802 3803 3804 3805 3806
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3807
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3808
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3809
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3810
create_test_act_fp16_class(TestSin)
3811
create_test_act_fp16_class(TestSinh)
3812 3813
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3814 3815 3816
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3817 3818
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3819
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3820 3821
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3822
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3823
create_test_act_fp16_class(TestELU)
3824
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3825 3826
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3827 3828 3829 3830
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3831
create_test_act_fp16_class(TestLog10, atol=5e-2)
3832
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3833 3834
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3835
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3836 3837 3838 3839 3840
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3841
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3842
create_test_act_fp16_class(TestHardSwish)
3843
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3844

3845 3846 3847 3848 3849

def create_test_act_bf16_class(parent,
                               atol=1e-2,
                               grad_check=True,
                               grad_atol=0.80):
3850

3851 3852 3853
    @unittest.skipIf(not paddle.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActBF16(parent):
3854

3855 3856 3857 3858 3859 3860 3861 3862 3863
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3864 3865 3866
            self.check_grad_with_place(place, ['X'],
                                       'Out',
                                       max_relative_error=grad_atol)
3867 3868 3869 3870 3871 3872 3873 3874

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)

Q
qijun 已提交
3875 3876
if __name__ == "__main__":
    unittest.main()