test_activation_op.py 114.6 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
C
Clementine 已提交
18
from scipy.special import expit, erf
J
joejiong 已提交
19

20
from op_test import OpTest, convert_float_to_uint16
21
import paddle
22
import paddle.nn.functional as F
J
joejiong 已提交
23 24
import paddle.fluid as fluid
import paddle.fluid.core as core
25
from paddle.fluid import Program, program_guard
26
from paddle.fluid.framework import _test_eager_guard
Q
qijun 已提交
27

28 29
paddle.enable_static()

Q
qijun 已提交
30

31
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
32 33 34 35
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
36
            self.assertRaises(TypeError, paddle.sqrt, in1)
Z
Zhaolong Xing 已提交
37
            # The input dtype of sqrt op must be float16, float32, float64.
38 39 40
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
41
            self.assertRaises(TypeError, paddle.sqrt, in2)
Z
Zhaolong Xing 已提交
42

43 44 45
            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16"
            )
46
            paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
47 48


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
C
chentianyu03 已提交
55 56
        self.check_eager = True
        self.python_api = paddle.exp
57

58
        np.random.seed(2049)
59
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
60 61 62 63
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
64 65

    def test_check_output(self):
66 67 68 69
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
74 75 76 77
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

Q
qijun 已提交
88

89 90 91 92 93
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
94 95 96
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
97
        self.python_api = paddle.expm1
R
ronnywang 已提交
98
        self.init_dtype()
99
        self.init_shape()
R
ronnywang 已提交
100 101

        np.random.seed(2049)
102
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
103 104 105 106 107 108
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
109 110 111 112
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
113 114


115 116 117 118 119
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
144
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
145 146 147 148 149 150 151 152 153

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
154
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


168
class TestParameter:
169 170
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
171
            np_x = np.array([0.1])
172
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
173
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
174 175
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
176
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
177
            expected = eval("np.%s(np_x)" % self.op_type)
178
            np.testing.assert_allclose(result, expected, rtol=1e-05)
179 180 181 182 183 184 185

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
186
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
187 188


C
chengduo 已提交
189
class TestSigmoid(TestActivation):
Q
qijun 已提交
190 191
    def setUp(self):
        self.op_type = "sigmoid"
192
        self.init_dtype()
193
        self.init_shape()
194

195
        np.random.seed(1024)
196
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
197 198 199 200
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
201

202 203 204
    def init_dtype(self):
        self.dtype = np.float32

205
    def test_check_grad(self):
206 207 208 209
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

210

211 212 213 214 215
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


216 217 218
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
219 220 221 222
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()
223
        self.init_shape()
224 225

        np.random.seed(1024)
226
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
227 228 229 230 231 232 233 234 235 236
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

237 238 239
    def init_shape(self):
        self.shape = [11, 17]

240 241 242 243 244 245 246 247 248
    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


249 250 251 252 253 254 255 256
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
257 258 259 260
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()
261
        self.init_shape()
M
minghaoBD 已提交
262 263

        np.random.seed(1024)
264
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


279 280 281 282 283
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []


M
minghaoBD 已提交
284 285 286 287
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
288 289 290
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
291
            else paddle.CPUPlace()
292
        )
M
minghaoBD 已提交
293 294 295 296 297 298 299 300 301 302 303 304

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
305
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
306 307 308 309 310 311 312 313 314

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
315
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
316 317 318 319 320 321 322
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
323 324 325
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
326 327
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
328 329 330
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
331 332 333
            F.silu(x_fp16)


C
chengduo 已提交
334
class TestLogSigmoid(TestActivation):
335 336
    def setUp(self):
        self.op_type = "logsigmoid"
337
        self.init_dtype()
338
        self.init_shape()
339

340
        np.random.seed(2048)
341
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
342 343
        out = np.log(1 / (1 + np.exp(-x)))

344
        self.inputs = {'X': x}
345
        self.outputs = {'Out': out}
346 347

    def test_check_grad(self):
348 349
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
350
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
351 352


353 354 355 356 357
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


358
class TestLogSigmoidAPI(unittest.TestCase):
359
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
360
    def setUp(self):
361
        np.random.seed(1024)
362
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
363 364 365
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
366
            else paddle.CPUPlace()
367
        )
368 369

    def test_static_api(self):
370
        paddle.enable_static()
371
        with paddle.static.program_guard(paddle.static.Program()):
372
            x = paddle.fluid.data('X', [11, 17])
373
            out1 = F.log_sigmoid(x)
374 375 376 377 378 379
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
380
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
381 382 383 384

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
385
        out1 = F.log_sigmoid(x)
386 387 388 389
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
390
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
391 392 393
        paddle.enable_static()

    def test_errors(self):
394
        paddle.enable_static()
395 396
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
397
            self.assertRaises(TypeError, F.log_sigmoid, 1)
398
            # The input dtype must be float16, float32, float64.
399 400 401
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
402
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
403
            # support the input dtype is float16
404 405 406
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
407
            F.log_sigmoid(x_fp16)
408 409


410
class TestTanh(TestActivation, TestParameter):
411 412
    def setUp(self):
        self.op_type = "tanh"
413
        self.init_dtype()
414 415
        self.init_shape()

416
        np.random.seed(1024)
417
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
418 419 420 421
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
422 423

    def test_check_grad(self):
424 425
        if self.dtype == np.float16:
            return
426
        self.check_grad(['X'], 'Out')
427

428
    def init_dtype(self):
429
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
430 431 432 433
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

434

435 436 437 438 439
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
440 441 442 443
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
444
        np.random.seed(1024)
W
WangXi 已提交
445
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
446 447 448
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
449
            else paddle.CPUPlace()
450
        )
451 452 453 454
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
455 456

    def test_static_api(self):
457
        paddle.enable_static()
W
WangXi 已提交
458
        with paddle.static.program_guard(paddle.static.Program()):
459
            x = paddle.fluid.data('X', [10, 12], self.dtype)
460
            out1 = self.tanh(x)
W
WangXi 已提交
461 462 463 464 465 466
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
467
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
468 469 470

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
471
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
472 473 474 475 476 477
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
478
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
479 480 481
        paddle.enable_static()

    def test_errors(self):
482
        paddle.enable_static()
W
WangXi 已提交
483 484
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
485
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
486
            # The input dtype must be float16, float32.
487 488 489
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
490
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
491
            # support the input dtype is float16
492 493 494
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
495 496 497 498 499 500 501
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
502 503


504
class TestAtan(TestActivation, TestParameter):
505 506 507
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
508
        self.init_shape()
509

510
        np.random.seed(1024)
511
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
512 513 514 515 516 517 518 519
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
520
        self.check_grad(['X'], 'Out')
521

W
WuHaobo 已提交
522 523 524 525 526 527 528
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
529
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
530 531 532
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

533 534 535 536 537 538 539 540
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

541

542 543 544 545 546
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


547 548 549 550
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
551
        self.init_shape()
552

553
        np.random.seed(1024)
554
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
555 556 557 558 559 560 561 562 563 564
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

565 566 567 568 569 570 571

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
572 573 574 575
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
576
            z = paddle.sinh(x).numpy()
577
            z_expected = np.sinh(np_x)
578
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
579 580 581 582

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
583 584 585 586 587 588 589 590 591
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
592

593
            pd_sinh_out = paddle.sinh(data_x)
594 595
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
596 597 598 599 600
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
601 602

        expected_res = np.sinh(input_x)
603
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
604 605 606 607

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
608 609 610
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
611 612
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
613
            loss = paddle.sinh(var)
614 615 616 617 618 619 620 621 622
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
623
            self.assertRaises(TypeError, paddle.sinh, 1)
624 625
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
626
            self.assertRaises(TypeError, paddle.sinh, x_int32)
627 628
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
629
            paddle.sinh(x_fp16)
630 631 632 633 634 635


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
636
        self.init_shape()
637

638
        np.random.seed(1024)
639
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
640 641 642 643 644 645 646 647 648 649
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

650 651 652 653 654 655 656

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
657 658 659 660
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
661
            z = paddle.cosh(x).numpy()
662
            z_expected = np.cosh(np_x)
663
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
664 665 666 667

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
668 669 670 671 672 673 674 675 676
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
677 678 679 680

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
681 682 683 684 685
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
686 687

        expected_res = np.cosh(input_x)
688
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
689 690 691 692

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
693 694 695
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
696 697
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
698
            loss = paddle.cosh(var)
699 700 701 702 703 704 705 706 707
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
708
            self.assertRaises(TypeError, paddle.cosh, 1)
709 710
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
711
            self.assertRaises(TypeError, paddle.cosh, x_int32)
712 713
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
714
            paddle.cosh(x_fp16)
715 716


717 718 719 720 721 722
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
723 724
    def setUp(self):
        self.op_type = "tanh_shrink"
725
        self.init_dtype()
726
        self.init_shape()
727

728
        np.random.seed(1024)
729
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
730
        out = ref_tanhshrink(x)
731

732
        self.inputs = {'X': x}
733
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
734 735

    def test_check_grad(self):
736 737
        if self.dtype == np.float16:
            return
738
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
739

740

741 742 743 744 745
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


746 747 748
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
749
        np.random.seed(1024)
750
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
751 752 753
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
754
            else paddle.CPUPlace()
755
        )
756 757

    def test_static_api(self):
758
        paddle.enable_static()
759
        with paddle.static.program_guard(paddle.static.Program()):
760
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
761 762 763 764 765 766 767
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
768
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
769 770 771 772 773 774 775 776 777

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
778
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
779 780 781
        paddle.enable_static()

    def test_errors(self):
782
        paddle.enable_static()
783 784 785 786
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
787 788 789
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
790 791
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
792 793 794
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
795 796 797
            F.tanhshrink(x_fp16)


798 799 800 801 802 803
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
804
class TestHardShrink(TestActivation):
805 806
    def setUp(self):
        self.op_type = "hard_shrink"
807
        self.init_dtype()
808
        self.init_shape()
809

810 811
        self.threshold = 0.5
        self.set_attrs()
812
        np.random.seed(1024)
813
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
814
        out = ref_hardshrink(x, self.threshold)
815

816
        self.attrs = {'threshold': self.threshold}
817
        self.inputs = {'X': x}
818
        self.outputs = {'Out': out}
819

820 821 822
    def init_shape(self):
        self.shape = [10, 12]

823 824 825
    def set_attrs(self):
        pass

826
    def test_check_grad(self):
827 828
        if self.dtype == np.float16:
            return
829
        self.check_grad(['X'], 'Out')
830 831


832 833 834 835 836
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


837 838 839 840 841 842 843 844
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


845 846 847
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
848
        np.random.seed(1024)
849
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
850 851 852
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
853
            else paddle.CPUPlace()
854
        )
855 856

    def test_static_api(self):
857
        paddle.enable_static()
858
        with paddle.static.program_guard(paddle.static.Program()):
859
            x = paddle.fluid.data('X', [10, 12])
860 861 862 863 864 865 866
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
867
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
868 869 870

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
871
        x = paddle.to_tensor(self.x_np)
872 873 874 875 876
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
877
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
878 879 880 881 882 883

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
884
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
885 886
        paddle.enable_static()

887
    def test_errors(self):
888
        paddle.enable_static()
889
        with paddle.static.program_guard(paddle.static.Program()):
890
            # The input type must be Variable.
891
            self.assertRaises(TypeError, F.hardshrink, 1)
892
            # The input dtype must be float16, float32, float64.
893 894 895
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
896
            self.assertRaises(TypeError, F.hardshrink, x_int32)
897
            # support the input dtype is float16
898 899 900
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
901
            F.hardshrink(x_fp16)
902 903


904 905 906 907 908 909 910 911 912 913 914
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
915
        np.random.seed(1024)
916
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
917 918 919
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
920
            else paddle.CPUPlace()
921
        )
922 923

    def test_static_api(self):
924
        paddle.enable_static()
925
        with paddle.static.program_guard(paddle.static.Program()):
926
            x = paddle.fluid.data('X', [10, 12])
927 928 929 930 931 932 933
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
934
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
935 936 937

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
938
        x = paddle.to_tensor(self.x_np)
939 940 941 942 943
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
944
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
945 946 947 948 949 950

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
951
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
952 953 954
        paddle.enable_static()

    def test_errors(self):
955
        paddle.enable_static()
956 957 958 959
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
960 961 962
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
963 964
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
965 966 967
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
968 969 970
            F.hardtanh(x_fp16)


971 972 973
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
974 975
        out - threshold
    )
976 977 978 979
    return out


class TestSoftshrink(TestActivation):
980 981
    def setUp(self):
        self.op_type = "softshrink"
982 983
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
984
        self.init_dtype()
985
        self.init_shape()
986

987
        threshold = 0.8
988

989
        np.random.seed(1023)
990
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
991 992 993
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
994
        self.outputs = {'Out': out}
995 996

    def test_check_grad(self):
997 998
        if self.dtype == np.float16:
            return
999
        self.check_grad(['X'], 'Out', check_eager=True)
1000

1001

1002 1003 1004 1005 1006
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1007 1008 1009 1010
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1011
        np.random.seed(1024)
1012
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1013 1014 1015
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1016
            else paddle.CPUPlace()
1017
        )
1018 1019

    def test_static_api(self):
1020
        paddle.enable_static()
1021
        with paddle.static.program_guard(paddle.static.Program()):
1022
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1023 1024 1025 1026 1027 1028 1029
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1030
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1031 1032 1033 1034 1035 1036 1037 1038 1039

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1040
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1041 1042
        paddle.enable_static()

1043
    def test_errors(self):
1044
        paddle.enable_static()
1045
        with paddle.static.program_guard(paddle.static.Program()):
1046
            # The input type must be Variable.
1047
            self.assertRaises(TypeError, F.softshrink, 1)
1048
            # The input dtype must be float16, float32, float64.
1049 1050 1051
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1052
            self.assertRaises(TypeError, F.softshrink, x_int32)
1053
            # The threshold must be no less than zero
1054 1055 1056
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1057
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1058
            # support the input dtype is float16
1059 1060 1061
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1062
            F.softshrink(x_fp16)
1063 1064


1065
class TestSqrt(TestActivation, TestParameter):
1066 1067
    def setUp(self):
        self.op_type = "sqrt"
1068
        self.python_api = paddle.sqrt
1069
        self.init_dtype()
1070
        self.init_shape()
1071

1072
        np.random.seed(1023)
1073
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1074 1075 1076 1077
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1078 1079

    def test_check_grad(self):
1080 1081
        if self.dtype == np.float16:
            return
1082 1083 1084 1085
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1086

1087

1088 1089 1090 1091 1092
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []


1093 1094 1095
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1096 1097 1098
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1099
        self.python_api = paddle.sqrt
1100
        self.init_dtype()
1101
        self.init_shape()
1102 1103

        np.random.seed(1023)
1104
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1115 1116 1117
    def init_shape(self):
        self.shape = [11, 17]

1118 1119
    def test_check_output(self):
        place = core.CUDAPlace(0)
1120
        self.check_output_with_place(place, check_eager=True)
1121 1122 1123

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1124
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1125 1126


Z
zhoukunsheng 已提交
1127 1128 1129
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1130
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1131
        self.init_dtype()
1132
        self.init_shape()
Z
zhoukunsheng 已提交
1133

1134
        np.random.seed(1024)
1135
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1136 1137 1138 1139 1140
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1141 1142 1143
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1144 1145 1146
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1147 1148 1149
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1150 1151


1152 1153 1154 1155 1156 1157 1158 1159
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1160
class TestAbs(TestActivation):
1161 1162
    def setUp(self):
        self.op_type = "abs"
1163
        self.init_dtype()
1164
        self.init_shape()
1165

1166
        np.random.seed(1024)
1167
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1168
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1169
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1170
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1171 1172
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1173 1174 1175 1176
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1177

1178 1179 1180
    def init_shape(self):
        self.shape = [4, 25]

1181
    def test_check_grad(self):
1182 1183
        if self.dtype == np.float16:
            return
1184
        self.check_grad(['X'], 'Out', check_eager=False)
1185

1186

1187 1188 1189 1190 1191
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1192
class TestCeil(TestActivation):
D
dzhwinter 已提交
1193 1194
    def setUp(self):
        self.op_type = "ceil"
1195 1196
        self.check_eager = True
        self.python_api = paddle.ceil
1197
        self.init_dtype()
1198
        self.init_shape()
1199

1200
        np.random.seed(1024)
1201
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1202 1203 1204 1205
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1206

1207 1208 1209
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1210
    # The same reason with TestFloor
C
chengduo 已提交
1211
    def test_check_grad(self):
1212 1213 1214
        pass


1215 1216 1217 1218 1219
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1220
class TestFloor(TestActivation):
D
dzhwinter 已提交
1221 1222
    def setUp(self):
        self.op_type = "floor"
1223 1224
        self.check_eager = True
        self.python_api = paddle.floor
1225
        self.init_dtype()
1226
        self.init_shape()
1227

1228
        np.random.seed(1024)
1229
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1230 1231 1232 1233
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1234

1235 1236 1237
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1238
    # the gradient on floor, ceil, round is undefined.
1239
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1240 1241
    # The same reason with TestFloor
    def test_check_grad(self):
1242 1243 1244
        pass


1245 1246 1247 1248 1249
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1250
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1251 1252
    def setUp(self):
        self.op_type = "cos"
1253
        self.init_dtype()
1254
        self.init_shape()
1255

1256
        np.random.seed(1024)
1257
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1258 1259 1260 1261
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1262

1263 1264 1265
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1266
    def test_check_grad(self):
1267 1268
        if self.dtype == np.float16:
            return
1269
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1270

1271

1272 1273 1274 1275 1276
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1277 1278 1279 1280 1281
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1282 1283
        self.init_shape()

J
joejiong 已提交
1284
        self.dtype = 'float32'
1285
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1286 1287 1288
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1289
            else paddle.CPUPlace()
1290
        )
J
joejiong 已提交
1291 1292 1293 1294 1295 1296

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1297 1298 1299
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1300 1301 1302 1303 1304
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1316 1317 1318
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1319
            else paddle.CPUPlace()
1320
        )
1321

J
joejiong 已提交
1322 1323 1324 1325 1326
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1327
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1328 1329 1330 1331 1332
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1333
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1334 1335 1336 1337
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1338
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1339 1340 1341 1342

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1343 1344 1345
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1346 1347 1348 1349 1350 1351 1352 1353
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1354 1355 1356 1357
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1358
        self.init_shape()
1359

1360
        np.random.seed(1024)
1361
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1362 1363 1364 1365 1366
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1367 1368 1369
    def init_shape(self):
        self.shape = [10, 12]

1370 1371 1372
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1373
        self.check_grad(['X'], 'Out')
1374 1375


1376 1377 1378 1379 1380
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1381
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1382 1383
    def setUp(self):
        self.op_type = "sin"
1384
        self.init_dtype()
1385
        self.init_shape()
1386

1387
        np.random.seed(1024)
1388
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1389 1390 1391 1392
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1393

1394 1395 1396
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1397
    def test_check_grad(self):
1398 1399
        if self.dtype == np.float16:
            return
1400
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1401 1402


1403 1404 1405 1406 1407
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1408 1409 1410 1411
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1412
        self.init_shape()
1413

1414
        np.random.seed(2048)
1415
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1416 1417 1418 1419 1420
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1421 1422 1423
    def init_shape(self):
        self.shape = [10, 12]

1424 1425 1426
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1427
        self.check_grad(['X'], 'Out')
1428 1429


1430 1431 1432 1433 1434
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1435 1436 1437 1438
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1439
        self.init_shape()
X
xiaoting 已提交
1440 1441

        np.random.seed(1024)
1442
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1443 1444 1445 1446 1447
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1448 1449 1450
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1451 1452 1453 1454 1455 1456
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1457 1458 1459 1460 1461
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1462 1463 1464 1465
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1466
        self.init_shape()
X
xiaoting 已提交
1467 1468

        np.random.seed(1024)
1469
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1470 1471 1472 1473 1474
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1475 1476 1477
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1478 1479 1480 1481 1482 1483
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1484 1485 1486 1487 1488
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1489 1490 1491 1492
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1493
        self.init_shape()
X
xiaoting 已提交
1494 1495

        np.random.seed(400)
1496
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1497 1498 1499 1500 1501
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1502 1503 1504
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1505 1506 1507 1508 1509 1510
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1511 1512 1513 1514 1515
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1516
class TestRound(TestActivation):
D
dzhwinter 已提交
1517 1518
    def setUp(self):
        self.op_type = "round"
1519 1520
        self.check_eager = True
        self.python_api = paddle.round
1521
        self.init_dtype()
1522
        self.init_shape()
1523

1524
        np.random.seed(1024)
1525
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1526 1527 1528 1529
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1530

1531 1532 1533
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1534
    def test_check_grad(self):
1535 1536 1537
        pass


1538 1539 1540 1541 1542
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1543
class TestRelu(TestActivation):
1544
    def setUp(self):
Q
qijun 已提交
1545
        self.op_type = "relu"
K
Kexin Zhao 已提交
1546
        self.init_dtype()
1547
        self.init_shape()
K
Kexin Zhao 已提交
1548

1549
        np.random.seed(1024)
1550
        if self.dtype == np.uint16:
1551
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1552 1553 1554 1555 1556
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1557
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1558 1559 1560 1561
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1562 1563

        self.outputs = {'Out': out}
1564 1565

    def test_check_grad(self):
K
Kexin Zhao 已提交
1566 1567
        if self.dtype == np.float16:
            return
1568
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1569 1570


1571 1572 1573 1574 1575
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1576 1577 1578
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1579
        np.random.seed(1024)
1580
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1581 1582 1583
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1584
            else paddle.CPUPlace()
1585
        )
1586 1587 1588 1589
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1590 1591

    def test_static_api(self):
1592
        paddle.enable_static()
1593
        with paddle.static.program_guard(paddle.static.Program()):
1594
            x = paddle.fluid.data('X', [10, 12])
1595
            out1 = self.relu(x)
1596 1597 1598 1599 1600 1601
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1602
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1603 1604 1605 1606 1607

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1608 1609
        out1 = m(x)
        out2 = self.relu(x)
1610 1611
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1612
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1613 1614
        paddle.enable_static()

1615
    def test_errors(self):
1616
        paddle.enable_static()
1617
        with paddle.static.program_guard(paddle.static.Program()):
1618
            # The input type must be Variable.
1619
            self.assertRaises(TypeError, self.relu, 1)
1620
            # The input dtype must be float16, float32, float64.
1621 1622 1623
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1624
            self.assertRaises(TypeError, self.relu, x_int32)
1625
            # support the input dtype is float16
1626 1627 1628
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1629 1630 1631 1632 1633 1634 1635
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1636 1637


1638 1639 1640 1641 1642 1643
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1644
class TestLeakyRelu(TestActivation):
1645 1646 1647
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1648 1649 1650
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1651
        self.init_shape()
1652
        alpha = self.get_alpha()
A
Adam 已提交
1653

1654
        np.random.seed(1024)
1655
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1656
        # The same reason with TestAbs
1657 1658
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1659

1660
        self.inputs = {'X': x}
A
Adam 已提交
1661
        self.outputs = {'Out': out}
1662
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1663 1664 1665 1666

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1667
        self.check_grad(['X'], 'Out')
1668 1669


1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1685 1686 1687 1688 1689
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1690 1691 1692 1693
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    # fluid.layers.leaky_relu
    def setUp(self):
1694
        np.random.seed(1024)
1695
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1696 1697 1698
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1699
            else paddle.CPUPlace()
1700
        )
1701 1702

    def test_static_api(self):
1703
        paddle.enable_static()
1704
        with paddle.static.program_guard(paddle.static.Program()):
1705
            x = paddle.fluid.data('X', [10, 12])
1706 1707 1708 1709 1710 1711 1712
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1713
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1714 1715 1716

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1717
        x = paddle.to_tensor(self.x_np)
1718 1719 1720 1721 1722
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1723
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1724 1725 1726 1727 1728 1729

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1730
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1731 1732
        paddle.enable_static()

1733
    def test_errors(self):
1734
        paddle.enable_static()
1735
        with paddle.static.program_guard(paddle.static.Program()):
1736
            # The input type must be Variable.
1737
            self.assertRaises(TypeError, F.leaky_relu, 1)
1738
            # The input dtype must be float16, float32, float64.
1739 1740 1741
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1742 1743
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1744 1745 1746
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1747
            F.leaky_relu(x_fp16)
1748 1749


1750 1751
def gelu(x, approximate):
    if approximate:
1752 1753 1754 1755 1756 1757 1758 1759
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1760 1761 1762 1763 1764 1765
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1766 1767 1768
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1769
        self.init_shape()
1770
        approximate = True
1771
        np.random.seed(1024)
1772
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1773
        out = gelu(x, approximate)
C
Clementine 已提交
1774

1775
        self.inputs = {'X': x}
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1789
        self.init_shape()
1790
        approximate = False
1791
        np.random.seed(2048)
1792
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1793
        out = gelu(x, approximate)
C
Clementine 已提交
1794

1795
        self.inputs = {'X': x}
C
Clementine 已提交
1796
        self.outputs = {'Out': out}
1797
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1798 1799 1800 1801

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1802
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1803 1804


1805 1806 1807 1808 1809
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1810 1811 1812
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1813
        np.random.seed(1024)
1814
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
1815 1816 1817
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1818
            else paddle.CPUPlace()
1819
        )
1820 1821

    def test_static_api(self):
1822
        paddle.enable_static()
1823
        with paddle.static.program_guard(paddle.static.Program()):
1824
            x = paddle.fluid.data('X', [11, 17])
1825 1826 1827 1828 1829 1830 1831
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1832
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1833 1834 1835 1836 1837 1838 1839 1840 1841

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1842
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1843 1844 1845 1846 1847 1848

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1849
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1850 1851 1852
        paddle.enable_static()

    def test_errors(self):
1853
        paddle.enable_static()
1854 1855 1856 1857
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1858 1859 1860
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
1861 1862
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1863 1864 1865
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
1866 1867 1868
            F.gelu(x_fp16)


C
chengduo 已提交
1869
class TestBRelu(TestActivation):
1870 1871
    def setUp(self):
        self.op_type = "brelu"
1872 1873
        self.init_dtype()

1874
        np.random.seed(1024)
Z
zhupengyang 已提交
1875
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1876 1877
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1878 1879
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1880
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1881 1882 1883
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1884 1885 1886

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1887
        self.outputs = {'Out': t}
1888 1889

    def test_check_grad(self):
1890 1891
        if self.dtype == np.float16:
            return
1892
        self.check_grad(['X'], 'Out')
1893

1894

1895 1896 1897 1898
class TestBreluAPI(unittest.TestCase):
    # test paddle.fluid.layers.brelu
    def setUp(self):
        np.random.seed(1024)
1899 1900
        self.t_min = 0.0
        self.t_max = 24.0
1901 1902 1903 1904 1905
        self.x_np = np.random.uniform(-1, 30, [10, 12]).astype('float32')
        self.out_ref = np.copy(self.x_np)
        self.out_ref[self.out_ref < self.t_min] = self.t_min
        self.out_ref[self.out_ref > self.t_max] = self.t_max
        self.out_ref = self.out_ref.astype('float32')
1906 1907 1908
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1909
            else paddle.CPUPlace()
1910
        )
1911 1912 1913 1914 1915 1916 1917

    def test_fluid_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', [10, 12])
            out = paddle.fluid.layers.brelu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
1918
            np.testing.assert_allclose(self.out_ref, res[0], rtol=1e-05)
1919 1920 1921 1922

            paddle.disable_static(self.place)
            x = paddle.to_tensor(self.x_np)
            out = paddle.fluid.layers.brelu(x)
1923
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
1924 1925
            paddle.enable_static()

1926 1927 1928 1929 1930 1931 1932 1933
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, fluid.layers.brelu, 1)
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
            self.assertRaises(TypeError, fluid.layers.brelu, x_int32)
            # support the input dtype is float16
1934 1935 1936
            x_fp16 = fluid.layers.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1937 1938 1939
            fluid.layers.brelu(x_fp16)


1940 1941 1942 1943 1944 1945 1946
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1947
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
1948
    def setUp(self):
1949
        self.op_type = "relu6"
1950
        self.init_dtype()
1951
        self.init_shape()
1952
        self.python_api = paddle.nn.functional.relu6
1953

1954
        np.random.seed(1024)
1955
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
1956
        x[np.abs(x) < 0.005] = 0.02
1957
        out = ref_relu6(x)
1958

1959 1960
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1961
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1962

1963 1964 1965
    def init_shape(self):
        self.shape = [10, 12]

1966 1967 1968
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1969
        self.check_grad(['X'], 'Out', check_eager=True)
1970 1971


1972 1973 1974 1975 1976
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


1977 1978 1979
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
1980
        np.random.seed(1024)
1981 1982
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
1983 1984 1985
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1986
            else paddle.CPUPlace()
1987
        )
1988 1989

    def test_static_api(self):
1990
        paddle.enable_static()
1991
        with paddle.static.program_guard(paddle.static.Program()):
1992
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1993 1994 1995 1996 1997 1998 1999
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
2000
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2001 2002 2003 2004 2005 2006 2007 2008 2009

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2010
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2011 2012 2013
        paddle.enable_static()

    def test_fluid_api(self):
2014
        paddle.enable_static()
2015 2016
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2017
            out = paddle.nn.functional.relu6(x)
2018 2019 2020
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
2021
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2022

2023
    def test_errors(self):
2024
        paddle.enable_static()
2025
        with paddle.static.program_guard(paddle.static.Program()):
2026
            # The input type must be Variable.
2027
            self.assertRaises(TypeError, F.relu6, 1)
2028
            # The input dtype must be float16, float32, float64.
2029 2030 2031
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2032
            self.assertRaises(TypeError, F.relu6, x_int32)
2033
            # support the input dtype is float16
2034 2035 2036
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2037
            F.relu6(x_fp16)
2038 2039


2040
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2041 2042 2043 2044
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2045 2046 2047
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2048 2049


H
huangjun12 已提交
2050 2051 2052 2053
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2054
        self.init_shape()
2055
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2056

2057
        np.random.seed(1024)
2058
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2059 2060 2061
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2062
        # the same with TestAbs
H
huangjun12 已提交
2063 2064
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2065
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2066

2067
        self.inputs = {'X': x}
H
huangjun12 已提交
2068 2069 2070
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

2071 2072 2073
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2074
    def test_check_grad(self):
2075 2076 2077 2078
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
2079 2080


2081 2082 2083 2084 2085
class TestHardSwish_ZeroDim(TestHardSwish):
    def init_shape(self):
        self.shape = []


2086 2087 2088 2089
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2090 2091 2092
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2093
            else paddle.CPUPlace()
2094
        )
2095 2096 2097

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2098
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2099 2100 2101 2102 2103 2104 2105
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2106
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2107 2108 2109

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2110
        x = paddle.to_tensor([11648.0, 11448.0])
2111 2112 2113
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2114
        out_ref = [11648.0, 11448.0]
2115
        for r in [out1, out2]:
2116
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2117
        paddle.enable_static()
2118 2119 2120 2121 2122 2123 2124 2125

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.hard_swish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2126
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2127 2128 2129 2130

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.fluid.layers.hard_swish(x)
2131
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2132 2133 2134 2135
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2136
            # The input type must be Variable.
2137
            self.assertRaises(TypeError, F.hardswish, 1)
2138
            # The input dtype must be float16, float32, float64.
2139 2140 2141
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2142
            self.assertRaises(TypeError, F.hardswish, x_int32)
2143
            # support the input dtype is float16
2144 2145 2146
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2147
            F.hardswish(x_fp16)
2148

2149 2150 2151 2152 2153
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2154

C
chengduo 已提交
2155
class TestSoftRelu(TestActivation):
2156 2157
    def setUp(self):
        self.op_type = "soft_relu"
2158 2159
        self.init_dtype()

2160
        np.random.seed(4096)
2161
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2162
        threshold = 2.0
Q
qijun 已提交
2163 2164
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2165
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2166 2167 2168
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2169 2170 2171 2172 2173
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2174 2175

    def test_check_grad(self):
2176 2177
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2178
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2179

2180

2181
def elu(x, alpha):
Z
zhupengyang 已提交
2182
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2183 2184 2185
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2186
class TestELU(TestActivation):
2187 2188
    def setUp(self):
        self.op_type = "elu"
2189
        self.init_dtype()
2190
        self.init_shape()
2191

2192
        np.random.seed(1024)
2193
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2194
        alpha = self.get_alpha()
2195
        out = elu(x, alpha)
2196 2197 2198 2199
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2200
        self.outputs = {'Out': out}
2201

2202 2203 2204
    def init_shape(self):
        self.shape = [10, 12]

2205
    def test_check_grad(self):
2206 2207
        if self.dtype == np.float16:
            return
2208
        self.check_grad(['X'], 'Out')
2209

Z
zhupengyang 已提交
2210
    def get_alpha(self):
2211
        return 1.0
Z
zhupengyang 已提交
2212 2213 2214 2215 2216 2217


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2218

2219 2220 2221 2222 2223
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2224 2225 2226
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2227
        np.random.seed(1024)
2228
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2229 2230 2231
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2232
            else paddle.CPUPlace()
2233
        )
2234 2235 2236 2237
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2238 2239

    def test_static_api(self):
2240
        paddle.enable_static()
2241
        with paddle.static.program_guard(paddle.static.Program()):
2242
            x = paddle.fluid.data('X', [10, 12])
2243
            out1 = self.elu(x)
2244 2245 2246 2247 2248 2249
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2250
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2251 2252 2253 2254

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2255 2256
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2257 2258 2259 2260
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2261
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2262

2263 2264
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2265 2266 2267 2268
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2269
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2270 2271
        paddle.enable_static()

2272
    def test_errors(self):
2273
        paddle.enable_static()
2274 2275
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2276
            self.assertRaises(TypeError, self.elu, 1)
2277
            # The input dtype must be float16, float32, float64.
2278 2279 2280
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2281
            self.assertRaises(TypeError, self.elu, x_int32)
2282
            # support the input dtype is float16
2283 2284 2285
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2286 2287 2288
            self.elu(x_fp16)


Z
zhupengyang 已提交
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2301 2302 2303 2304 2305 2306 2307 2308 2309
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2310
        self.init_shape()
2311

2312
        self.python_api = paddle.nn.functional.celu
2313
        np.random.seed(1024)
2314
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2315 2316 2317 2318 2319 2320
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2321 2322 2323
    def init_shape(self):
        self.shape = [10, 12]

2324 2325 2326
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2327
        self.check_grad(['X'], 'Out', check_eager=True)
2328 2329


2330 2331 2332 2333 2334
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2335 2336 2337 2338 2339
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2340 2341 2342
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2343
            else paddle.CPUPlace()
2344
        )
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2361
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2362 2363 2364 2365 2366 2367 2368 2369 2370 2371

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2372
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2373 2374 2375 2376 2377 2378 2379

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2380
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2381 2382 2383 2384 2385 2386 2387 2388
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2389 2390 2391
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2392 2393
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2394 2395 2396
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2397 2398
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2399 2400 2401
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2402 2403
            self.celu(x_fp16)

2404 2405 2406 2407 2408
    def test_api_eager_dygraph(self):
        with _test_eager_guard():
            self.test_dygraph_api()
            self.test_errors()

2409

C
chengduo 已提交
2410
class TestReciprocal(TestActivation):
Q
qijun 已提交
2411 2412
    def setUp(self):
        self.op_type = "reciprocal"
2413
        self.python_api = paddle.reciprocal
2414
        self.init_dtype()
2415
        self.init_shape()
2416

2417
        np.random.seed(1024)
2418
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2419 2420 2421 2422
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2423 2424

    def test_check_grad(self):
2425 2426
        if self.dtype == np.float16:
            return
2427 2428 2429 2430
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2431 2432


2433 2434 2435 2436 2437
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2438
class TestLog(TestActivation):
Q
qijun 已提交
2439 2440
    def setUp(self):
        self.op_type = "log"
2441 2442
        self.check_eager = True
        self.python_api = paddle.log
2443
        self.init_dtype()
2444
        self.init_shape()
2445

2446
        np.random.seed(1024)
2447
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2448 2449 2450 2451
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2452 2453

    def test_check_grad(self):
2454 2455
        if self.dtype == np.float16:
            return
2456
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2457

2458
    def test_error(self):
2459 2460 2461 2462 2463 2464
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
        )
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
        )
2465 2466 2467 2468

        self.assertRaises(TypeError, fluid.layers.log, in1)
        self.assertRaises(TypeError, fluid.layers.log, in2)

2469

2470 2471 2472 2473 2474
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2475 2476 2477
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2478 2479
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2480
        self.init_dtype()
2481
        self.init_shape()
J
joejiong 已提交
2482

2483
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2484 2485 2486 2487 2488 2489 2490 2491
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2492
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2493 2494 2495 2496 2497 2498 2499 2500 2501

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2502 2503 2504
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2505
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2506 2507 2508
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2509 2510 2511 2512

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2513 2514 2515 2516 2517
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2518
        expected_res = np.log2(input_x)
2519
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2520 2521 2522 2523 2524 2525 2526 2527

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2528
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2529 2530


2531 2532 2533 2534 2535
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2536 2537 2538
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2539 2540
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2541
        self.init_dtype()
2542
        self.init_shape()
J
joejiong 已提交
2543

2544
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2545 2546 2547 2548 2549 2550 2551 2552
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2553
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2554

2555 2556 2557 2558 2559 2560 2561

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2562 2563 2564 2565 2566 2567 2568 2569
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2570 2571 2572
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2573
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2574 2575 2576
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2577 2578 2579 2580

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2581 2582 2583 2584 2585
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2586
        expected_res = np.log10(input_x)
2587
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2588 2589 2590 2591 2592 2593 2594 2595

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2596
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2597 2598


2599 2600 2601
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2602 2603
        self.check_eager = True
        self.python_api = paddle.log1p
2604
        self.init_dtype()
2605
        self.init_shape()
2606

2607
        np.random.seed(1024)
2608
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2609 2610 2611 2612 2613 2614 2615 2616
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2617
        self.check_grad(['X'], 'Out', check_eager=True)
2618

2619 2620 2621 2622 2623 2624 2625

class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2626 2627 2628
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2629 2630 2631 2632 2633 2634
            data_x = fluid.layers.data(
                name="data_x",
                shape=[11, 17],
                append_batch_size=False,
                dtype="float64",
            )
2635 2636 2637 2638

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2639 2640 2641 2642 2643
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2644
        expected_res = np.log1p(input_x)
2645
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2646 2647 2648 2649 2650 2651 2652 2653

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2654
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2655 2656


C
chengduo 已提交
2657
class TestSquare(TestActivation):
Q
qijun 已提交
2658 2659
    def setUp(self):
        self.op_type = "square"
2660
        self.python_api = paddle.square
2661
        self.init_dtype()
2662
        self.init_shape()
2663

2664
        np.random.seed(1024)
2665
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2666 2667 2668 2669
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2670 2671

    def test_check_grad(self):
2672 2673
        if self.dtype == np.float16:
            return
2674 2675 2676
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2677 2678 2679

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2680

2681

2682 2683 2684 2685 2686
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2687 2688 2689
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2690 2691 2692
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2693
        self.python_api = paddle.square
2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2710
        self.check_output_with_place(place, check_eager=True)
2711 2712 2713

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2714 2715 2716
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2717 2718


C
chengduo 已提交
2719
class TestPow(TestActivation):
2720 2721
    def setUp(self):
        self.op_type = "pow"
2722
        self.python_api = paddle.pow
2723
        self.check_eager = True
2724
        self.init_dtype()
2725
        self.init_shape()
2726

2727
        np.random.seed(1024)
2728
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2729 2730 2731
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2732
        self.attrs = {'factor': 3.0}
2733
        self.outputs = {'Out': out}
2734

2735 2736 2737
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2738
    def test_check_grad(self):
2739 2740
        if self.dtype == np.float16:
            return
2741
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2742

2743

2744 2745 2746 2747 2748
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2749 2750 2751
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2752 2753
        self.check_eager = False
        self.python_api = paddle.pow
2754 2755
        self.init_dtype()

2756
        np.random.seed(1024)
2757 2758 2759 2760 2761
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2762
            'FactorTensor': np.array([3.0]).astype("float32"),
2763 2764 2765 2766 2767 2768
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2769
        self.check_output(check_eager=self.check_eager)
2770 2771 2772 2773

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2774
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2775 2776 2777

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2778 2779 2780 2781 2782 2783
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
        res = fluid.layers.data(
            name="res", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
2784 2785 2786 2787 2788

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
2789 2790 2791
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2792 2793

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2794
        res_1, res_2, res, res_6 = exe.run(
2795 2796
            fluid.default_main_program(),
            feed={"x": input},
2797 2798
            fetch_list=[out_1, out_2, res, out_6],
        )
2799

2800 2801 2802
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2803

2804
    def test_error(self):
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
        )
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
        )
        in3 = fluid.layers.data(
            name="in3", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
        in4 = fluid.layers.data(
            name="in4", shape=[11, 17], append_batch_size=False, dtype="float64"
        )
2817 2818 2819 2820 2821 2822 2823 2824

        factor_1 = fluid.layers.fill_constant([1], "float64", 3.0)

        self.assertRaises(TypeError, fluid.layers.pow, x=in1, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in2, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in3, factor=factor_1)
        self.assertRaises(TypeError, fluid.layers.pow, x=in4, factor=factor_1)

2825

2826 2827 2828 2829 2830
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2831
class TestSTanh(TestActivation):
2832 2833 2834 2835 2836 2837
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2838 2839
    def setUp(self):
        self.op_type = "stanh"
2840
        self.init_dtype()
2841 2842
        self.init_shape()

2843 2844
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2845

2846
        np.random.seed(1024)
2847
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2848 2849
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2850

2851
        self.inputs = {'X': x}
2852
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2853
        self.outputs = {'Out': out}
2854

Q
qijun 已提交
2855
    def test_check_grad(self):
2856 2857
        if self.dtype == np.float16:
            return
2858
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2859

2860

2861 2862 2863 2864 2865 2866 2867 2868 2869 2870
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


2871 2872 2873 2874 2875
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
2889 2890 2891
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
2892
            else paddle.CPUPlace()
2893
        )
2894 2895 2896 2897 2898 2899 2900 2901 2902 2903

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2904
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2905 2906 2907 2908 2909 2910 2911

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2912
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2913 2914 2915 2916 2917 2918
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
2919
            out = paddle.stanh(x, self.scale_a, self.scale_b)
2920 2921 2922
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2923
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2924

2925
    def test_errors(self):
2926 2927
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2928
            # The input type must be Variable.
2929
            self.assertRaises(TypeError, paddle.stanh, 1)
2930
            # The input dtype must be float16, float32, float64.
2931 2932 2933
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2934
            self.assertRaises(TypeError, paddle.stanh, x_int32)
2935
            # support the input dtype is float16
2936 2937 2938
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
2950 2951


2952 2953
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
2954 2955 2956 2957
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
2958 2959 2960
    return out


C
chengduo 已提交
2961
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
2962 2963
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
2964
        self.python_api = paddle.nn.functional.softplus
2965
        self.init_dtype()
2966
        self.init_shape()
2967

2968 2969
        beta = 2
        threshold = 15
2970

2971
        np.random.seed(1024)
2972
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2973 2974 2975
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
2976
        self.outputs = {'Out': out}
K
kexinzhao 已提交
2977

W
Wang Bojun 已提交
2978 2979
        self.check_eager = True

2980 2981 2982
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
2983
    def test_check_grad(self):
2984 2985
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
2986 2987 2988
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
2989

2990

2991 2992 2993 2994 2995
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


2996 2997 2998
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3026 3027 3028 3029 3030
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3031
        np.random.seed(1024)
3032
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3033 3034 3035
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3036
            else paddle.CPUPlace()
3037
        )
3038 3039

    def test_static_api(self):
3040
        paddle.enable_static()
3041
        with paddle.static.program_guard(paddle.static.Program()):
3042
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3043 3044 3045 3046 3047 3048 3049
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
3050
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3051 3052 3053 3054 3055 3056 3057 3058 3059

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3060
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3061 3062 3063
        paddle.enable_static()

    def test_errors(self):
3064
        paddle.enable_static()
3065 3066 3067 3068
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
3069 3070 3071
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3072 3073
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
3074 3075 3076
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3077 3078 3079 3080 3081 3082 3083 3084
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3085
class TestSoftsign(TestActivation):
3086 3087
    def setUp(self):
        self.op_type = "softsign"
3088
        self.init_dtype()
3089 3090
        self.init_shape()

3091
        self.python_api = paddle.nn.functional.softsign
3092

3093
        np.random.seed(1024)
3094
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3095 3096
        out = ref_softsign(x)
        self.inputs = {'X': x}
3097
        self.outputs = {'Out': out}
3098

3099 3100 3101
    def init_shape(self):
        self.shape = [10, 12]

3102
    def test_check_grad(self):
3103 3104
        if self.dtype == np.float16:
            return
3105
        self.check_grad(['X'], 'Out', check_eager=True)
3106 3107


3108 3109 3110 3111 3112
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3113 3114 3115
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3116
        np.random.seed(1024)
3117
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3118 3119 3120
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3121
            else paddle.CPUPlace()
3122
        )
3123 3124

    def test_static_api(self):
3125
        paddle.enable_static()
3126
        with paddle.static.program_guard(paddle.static.Program()):
3127
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3128 3129 3130 3131 3132 3133 3134
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3135
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3136 3137 3138 3139 3140 3141 3142 3143 3144

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3145
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3146 3147 3148
        paddle.enable_static()

    def test_errors(self):
3149
        paddle.enable_static()
3150 3151 3152 3153
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3154 3155 3156
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3157 3158
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3159 3160 3161
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3162 3163 3164
            F.softsign(x_fp16)


3165 3166 3167 3168 3169
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3170
class TestThresholdedRelu(TestActivation):
3171 3172
    def setUp(self):
        self.op_type = "thresholded_relu"
3173
        self.init_dtype()
3174
        self.init_shape()
3175

3176
        threshold = 15
3177

3178
        np.random.seed(1024)
3179
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3180 3181 3182 3183
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3184
        self.outputs = {'Out': out}
3185

3186 3187 3188
    def init_shape(self):
        self.shape = [10, 12]

3189
    def test_check_grad(self):
3190 3191
        if self.dtype == np.float16:
            return
3192
        self.check_grad(['X'], 'Out')
3193 3194


3195 3196 3197 3198 3199
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3200 3201 3202 3203 3204 3205 3206
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3207 3208 3209
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3210
            else paddle.CPUPlace()
3211
        )
3212 3213 3214 3215

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3216
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3217 3218 3219 3220 3221 3222 3223
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3224
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3225 3226 3227 3228 3229 3230 3231 3232 3233

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3234
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3235 3236
        paddle.enable_static()

3237
    def test_errors(self):
3238 3239
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3240
            # The input type must be Variable.
3241
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3242
            # The input dtype must be float16, float32, float64.
3243 3244 3245
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3246
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3247
            # support the input dtype is float16
3248 3249 3250
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3251
            F.thresholded_relu(x_fp16)
3252 3253


3254
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3255
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3256 3257


C
chengduo 已提交
3258
class TestHardSigmoid(TestActivation):
3259 3260
    def setUp(self):
        self.op_type = "hard_sigmoid"
3261 3262 3263 3264
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3265
        self.init_shape()
3266

3267
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3268
        lower_threshold = -self.offset / self.slope
3269
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3270

3271
        # Same reason as TestAbs
3272 3273 3274
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3275

3276
        out = ref_hardsigmoid(x, self.slope, self.offset)
3277

3278 3279
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3280
        self.outputs = {'Out': out}
3281

3282 3283 3284
    def init_shape(self):
        self.shape = [10, 12]

3285 3286
    def set_attrs(self):
        pass
3287

3288

3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3300 3301 3302 3303 3304
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3305 3306 3307 3308
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3309 3310 3311
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3312
            else paddle.CPUPlace()
3313
        )
3314 3315 3316

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3317
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3318 3319 3320 3321 3322 3323 3324
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3325
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3326 3327 3328 3329 3330 3331 3332 3333 3334

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3335
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3336
        paddle.enable_static()
3337 3338 3339 3340

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3341
            out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3342 3343 3344
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3345
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3346 3347 3348

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
3349
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3350
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3351 3352 3353 3354
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3355
            # The input type must be Variable.
3356
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3357
            # The input dtype must be float16, float32, float64.
3358 3359 3360
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3361
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3362
            # support the input dtype is float16
3363 3364 3365
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3366
            F.hardsigmoid(x_fp16)
3367 3368


3369 3370 3371 3372 3373
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3374
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3375 3376
    def setUp(self):
        self.op_type = "swish"
3377
        self.python_api = paddle.nn.functional.swish
3378
        self.init_dtype()
3379 3380
        self.init_shape()

3381
        self.check_eager = True
3382

3383
        np.random.seed(1024)
3384
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3385 3386
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3387
        self.attrs = {'beta': 1.0}
3388
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3389

3390 3391 3392
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3393
    def test_check_grad(self):
3394 3395
        if self.dtype == np.float16:
            return
3396 3397 3398 3399
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3400

A
Abhinav Arora 已提交
3401

3402 3403 3404 3405 3406
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3407 3408 3409 3410 3411
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3412 3413 3414
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3415
            else paddle.CPUPlace()
3416
        )
3417 3418 3419 3420

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3421
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3422 3423 3424 3425 3426 3427 3428
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3429
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3430

3431
    def func_test_dygraph_api(self):
3432 3433 3434 3435 3436 3437 3438
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3439
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3440 3441
        paddle.enable_static()

3442
    def test_dygraph_api(self):
3443
        with _test_eager_guard():
3444 3445
            self.func_test_dygraph_api()
        self.func_test_dygraph_api()
3446

3447 3448 3449 3450
    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3451
            out = paddle.nn.functional.swish(x)
3452 3453 3454
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3455
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3456

3457
    def test_errors(self):
3458 3459
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3460
            # The input type must be Variable.
3461
            self.assertRaises(TypeError, F.swish, 1)
3462
            # The input dtype must be float16, float32, float64.
3463 3464 3465
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3466
            self.assertRaises(TypeError, F.swish, x_int32)
3467
            # support the input dtype is float16
3468 3469 3470
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3471
            F.swish(x_fp16)
3472 3473


3474 3475 3476 3477
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3478 3479 3480 3481 3482 3483
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3484
        self.python_api = paddle.fluid.layers.nn.mish
3485
        self.init_dtype()
3486
        self.init_shape()
3487 3488

        np.random.seed(1024)
3489
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3490 3491 3492 3493
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3494 3495 3496
    def init_shape(self):
        self.shape = [10, 12]

3497 3498 3499
    def test_check_output(self):
        self.check_output(check_eager=True)

3500 3501 3502
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3503
        self.check_grad(['X'], 'Out', check_eager=True)
3504 3505


3506 3507 3508 3509 3510
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3511 3512 3513 3514 3515
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3516 3517 3518
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3519
            else paddle.CPUPlace()
3520
        )
3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3533
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3534 3535 3536 3537 3538 3539 3540 3541 3542

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3543
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
            out = fluid.layers.mish(x)
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3554
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3555 3556 3557 3558 3559 3560 3561

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3562 3563 3564
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3565 3566
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3567 3568 3569
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3570 3571 3572
            F.mish(x_fp16)


3573
# ------------------ Test Cudnn Activation----------------------
3574
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3575 3576 3577
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3593 3594 3595 3596 3597 3598 3599
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
    parent, atol=1e-3, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3600 3601 3602
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3603

C
chengduo 已提交
3604
        def test_check_output(self):
3605
            place = core.CUDAPlace(0)
C
chengduo 已提交
3606 3607 3608
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3609

C
chengduo 已提交
3610 3611 3612 3613
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3614 3615 3616
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol
                )
C
chengduo 已提交
3617 3618 3619 3620 3621 3622 3623

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3624
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3625
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3626
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3627 3628
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3629
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3630
create_test_act_fp16_class(TestHardShrink)
3631
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3632 3633 3634 3635 3636
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3637
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3638
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3639
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3640
create_test_act_fp16_class(TestSin)
3641
create_test_act_fp16_class(TestSinh)
3642 3643
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3644 3645 3646
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3647 3648
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3649
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3650 3651
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3652
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3653
create_test_act_fp16_class(TestELU)
3654
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3655 3656
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3657 3658 3659 3660
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3661
create_test_act_fp16_class(TestLog10, atol=5e-2)
3662
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3663 3664
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3665
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3666 3667 3668 3669 3670
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3671
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3672
create_test_act_fp16_class(TestHardSwish)
3673
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3674

3675

3676 3677 3678 3679 3680 3681
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3682 3683 3684 3685 3686 3687 3688 3689 3690 3691
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3692 3693 3694
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3695 3696 3697 3698 3699 3700 3701

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3702
create_test_act_bf16_class(TestAbs)
3703

Q
qijun 已提交
3704 3705
if __name__ == "__main__":
    unittest.main()