test_activation_op.py 111.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
18
from op_test import OpTest, convert_float_to_uint16
19 20
from scipy.special import erf, expit

21
import paddle
J
joejiong 已提交
22 23
import paddle.fluid as fluid
import paddle.fluid.core as core
24
import paddle.nn.functional as F
25
from paddle.fluid import Program, program_guard
Q
qijun 已提交
26

27 28
paddle.enable_static()

Q
qijun 已提交
29

30
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
31 32 33 34
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
35
            self.assertRaises(TypeError, paddle.sqrt, in1)
Z
Zhaolong Xing 已提交
36
            # The input dtype of sqrt op must be float16, float32, float64.
G
GGBond8488 已提交
37 38
            in2 = paddle.static.data(
                name='input2', shape=[-1, 12, 10], dtype="int32"
39
            )
40
            self.assertRaises(TypeError, paddle.sqrt, in2)
Z
Zhaolong Xing 已提交
41

G
GGBond8488 已提交
42 43
            in3 = paddle.static.data(
                name='input3', shape=[-1, 12, 10], dtype="float16"
44
            )
45
            paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
46 47


C
chengduo 已提交
48
class TestActivation(OpTest):
Q
qijun 已提交
49 50
    def setUp(self):
        self.op_type = "exp"
51
        self.init_dtype()
52
        self.init_shape()
53
        self.init_kernel_type()
C
chentianyu03 已提交
54 55
        self.check_eager = True
        self.python_api = paddle.exp
56

57
        np.random.seed(2049)
58
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
59 60 61 62
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
63 64

    def test_check_output(self):
65 66 67 68
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
69 70

    def test_check_grad(self):
71 72
        if self.dtype == np.float16:
            return
73 74 75 76
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
77

78
    def init_dtype(self):
79
        self.dtype = np.float64
80

81 82 83
    def init_shape(self):
        self.shape = [11, 17]

84 85 86
    def init_kernel_type(self):
        pass

Q
qijun 已提交
87

88 89 90 91 92
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
93 94 95
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
96
        self.python_api = paddle.expm1
R
ronnywang 已提交
97
        self.init_dtype()
98
        self.init_shape()
R
ronnywang 已提交
99 100

        np.random.seed(2049)
101
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
102 103 104 105 106 107
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
108 109 110 111
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
112 113


114 115 116 117 118
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
143
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
144 145 146 147 148 149 150 151 152

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
153
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


167
class TestParameter:
168 169
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
G
GGBond8488 已提交
170 171
            np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
            data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
W
WuHaobo 已提交
172
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
173 174
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
175
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
176
            expected = eval("np.%s(np_x)" % self.op_type)
177
            np.testing.assert_allclose(result, expected, rtol=1e-05)
178 179 180 181 182 183 184

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
185
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
186 187


C
chengduo 已提交
188
class TestSigmoid(TestActivation):
Q
qijun 已提交
189 190
    def setUp(self):
        self.op_type = "sigmoid"
191
        self.init_dtype()
192
        self.init_shape()
193

194
        np.random.seed(1024)
195
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
196 197 198 199
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
200

201 202 203
    def init_dtype(self):
        self.dtype = np.float32

204
    def test_check_grad(self):
205 206 207 208
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

209

210 211 212 213 214
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


215 216 217
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
218 219 220 221
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()
222
        self.init_shape()
223 224

        np.random.seed(1024)
225
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
226 227 228 229 230 231 232 233 234 235
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

236 237 238
    def init_shape(self):
        self.shape = [11, 17]

239 240 241 242 243 244 245 246 247
    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


248 249 250 251 252 253 254 255
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
256 257 258 259
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()
260
        self.init_shape()
M
minghaoBD 已提交
261 262

        np.random.seed(1024)
263
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


278 279 280 281 282
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []


M
minghaoBD 已提交
283 284 285 286
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
287 288 289
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
290
            else paddle.CPUPlace()
291
        )
M
minghaoBD 已提交
292 293 294 295 296 297 298 299 300 301 302 303

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
304
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
305 306 307 308 309 310 311 312 313

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
314
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
315 316 317 318 319 320 321
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
322 323 324
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
325 326
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
327 328 329
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
330 331 332
            F.silu(x_fp16)


C
chengduo 已提交
333
class TestLogSigmoid(TestActivation):
334 335
    def setUp(self):
        self.op_type = "logsigmoid"
336
        self.init_dtype()
337
        self.init_shape()
338

339
        np.random.seed(2048)
340
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
341 342
        out = np.log(1 / (1 + np.exp(-x)))

343
        self.inputs = {'X': x}
344
        self.outputs = {'Out': out}
345 346

    def test_check_grad(self):
347 348
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
349
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
350 351


352 353 354 355 356
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


357
class TestLogSigmoidAPI(unittest.TestCase):
358
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
359
    def setUp(self):
360
        np.random.seed(1024)
361
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
362 363 364
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
365
            else paddle.CPUPlace()
366
        )
367 368

    def test_static_api(self):
369
        paddle.enable_static()
370
        with paddle.static.program_guard(paddle.static.Program()):
371
            x = paddle.fluid.data('X', [11, 17])
372
            out1 = F.log_sigmoid(x)
373 374 375 376 377 378
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
379
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
380 381 382 383

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
384
        out1 = F.log_sigmoid(x)
385 386 387 388
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
389
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
390 391 392
        paddle.enable_static()

    def test_errors(self):
393
        paddle.enable_static()
394 395
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
396
            self.assertRaises(TypeError, F.log_sigmoid, 1)
397
            # The input dtype must be float16, float32, float64.
398 399 400
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
401
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
402
            # support the input dtype is float16
403 404 405
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
406
            F.log_sigmoid(x_fp16)
407 408


409
class TestTanh(TestActivation, TestParameter):
410 411
    def setUp(self):
        self.op_type = "tanh"
412
        self.init_dtype()
413 414
        self.init_shape()

415
        np.random.seed(1024)
416
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
417 418 419 420
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
421 422

    def test_check_grad(self):
423 424
        if self.dtype == np.float16:
            return
425
        self.check_grad(['X'], 'Out')
426

427
    def init_dtype(self):
428
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
429 430 431 432
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

433

434 435 436 437 438
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
439 440 441 442
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
443
        np.random.seed(1024)
W
WangXi 已提交
444
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
445 446 447
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
448
            else paddle.CPUPlace()
449
        )
450 451 452 453
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
454 455

    def test_static_api(self):
456
        paddle.enable_static()
W
WangXi 已提交
457
        with paddle.static.program_guard(paddle.static.Program()):
458
            x = paddle.fluid.data('X', [10, 12], self.dtype)
459
            out1 = self.tanh(x)
W
WangXi 已提交
460 461 462 463 464 465
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
466
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
467 468 469

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
470
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
471 472 473 474 475 476
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
477
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
478 479 480
        paddle.enable_static()

    def test_errors(self):
481
        paddle.enable_static()
W
WangXi 已提交
482 483
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
484
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
485
            # The input dtype must be float16, float32.
486 487 488
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
489
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
490
            # support the input dtype is float16
491 492 493
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
494 495 496 497 498 499 500
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
501 502


503
class TestAtan(TestActivation, TestParameter):
504 505 506
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
507
        self.init_shape()
508

509
        np.random.seed(1024)
510
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
511 512 513 514 515 516 517 518
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
519
        self.check_grad(['X'], 'Out')
520

W
WuHaobo 已提交
521 522
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
G
GGBond8488 已提交
523 524
            np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
            data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
W
WuHaobo 已提交
525 526 527
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
528
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
529 530 531
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

532 533 534 535 536 537 538 539
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

540

541 542 543 544 545
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


546 547 548 549
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
550
        self.init_shape()
551

552
        np.random.seed(1024)
553
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
554 555 556 557 558 559 560 561 562 563
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

564 565 566 567 568 569 570

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
571 572 573 574
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
575
            z = paddle.sinh(x).numpy()
576
            z_expected = np.sinh(np_x)
577
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
578 579 580 581

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
582 583 584
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
G
GGBond8488 已提交
585
            data_x = paddle.static.data(
586 587 588 589
                name="data_x",
                shape=test_data_shape,
                dtype="float32",
            )
590

591
            pd_sinh_out = paddle.sinh(data_x)
592 593
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
594 595 596 597 598
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
599 600

        expected_res = np.sinh(input_x)
601
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
602 603 604 605

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
606 607 608
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
609 610
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
611
            loss = paddle.sinh(var)
612 613 614 615 616 617 618 619 620
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
621
            self.assertRaises(TypeError, paddle.sinh, 1)
622 623
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
624
            self.assertRaises(TypeError, paddle.sinh, x_int32)
625 626
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
627
            paddle.sinh(x_fp16)
628 629 630 631 632 633


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
634
        self.init_shape()
635

636
        np.random.seed(1024)
637
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
638 639 640 641 642 643 644 645 646 647
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

648 649 650 651 652 653 654

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
655 656 657 658
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
659
            z = paddle.cosh(x).numpy()
660
            z_expected = np.cosh(np_x)
661
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
662 663 664 665

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
666 667 668
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
G
GGBond8488 已提交
669
            data_x = paddle.static.data(
670 671 672 673
                name="data_x",
                shape=test_data_shape,
                dtype="float32",
            )
674 675 676 677

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
678 679 680 681 682
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
683 684

        expected_res = np.cosh(input_x)
685
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
686 687 688 689

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
690 691 692
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
693 694
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
695
            loss = paddle.cosh(var)
696 697 698 699 700 701 702 703 704
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
705
            self.assertRaises(TypeError, paddle.cosh, 1)
706 707
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
708
            self.assertRaises(TypeError, paddle.cosh, x_int32)
709 710
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
711
            paddle.cosh(x_fp16)
712 713


714 715 716 717 718 719
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
720 721
    def setUp(self):
        self.op_type = "tanh_shrink"
722
        self.init_dtype()
723
        self.init_shape()
724

725
        np.random.seed(1024)
726
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
727
        out = ref_tanhshrink(x)
728

729
        self.inputs = {'X': x}
730
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
731 732

    def test_check_grad(self):
733 734
        if self.dtype == np.float16:
            return
735
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
736

737

738 739 740 741 742
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


743 744 745
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
746
        np.random.seed(1024)
747
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
748 749 750
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
751
            else paddle.CPUPlace()
752
        )
753 754

    def test_static_api(self):
755
        paddle.enable_static()
756
        with paddle.static.program_guard(paddle.static.Program()):
757
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
758 759 760 761 762 763 764
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
765
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
766 767 768 769 770 771 772 773 774

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
775
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
776 777 778
        paddle.enable_static()

    def test_errors(self):
779
        paddle.enable_static()
780 781 782 783
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
784 785 786
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
787 788
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
789 790 791
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
792 793 794
            F.tanhshrink(x_fp16)


795 796 797 798 799 800
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
801
class TestHardShrink(TestActivation):
802 803
    def setUp(self):
        self.op_type = "hard_shrink"
804
        self.init_dtype()
805
        self.init_shape()
806

807 808
        self.threshold = 0.5
        self.set_attrs()
809
        np.random.seed(1024)
810
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
811
        out = ref_hardshrink(x, self.threshold)
812

813
        self.attrs = {'threshold': self.threshold}
814
        self.inputs = {'X': x}
815
        self.outputs = {'Out': out}
816

817 818 819
    def init_shape(self):
        self.shape = [10, 12]

820 821 822
    def set_attrs(self):
        pass

823
    def test_check_grad(self):
824 825
        if self.dtype == np.float16:
            return
826
        self.check_grad(['X'], 'Out')
827 828


829 830 831 832 833
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


834 835 836 837 838 839 840 841
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


842 843 844
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
845
        np.random.seed(1024)
846
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
847 848 849
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
850
            else paddle.CPUPlace()
851
        )
852 853

    def test_static_api(self):
854
        paddle.enable_static()
855
        with paddle.static.program_guard(paddle.static.Program()):
856
            x = paddle.fluid.data('X', [10, 12])
857 858 859 860 861 862 863
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
864
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
865 866 867

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
868
        x = paddle.to_tensor(self.x_np)
869 870 871 872 873
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
874
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
875 876 877 878 879 880

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
881
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
882 883
        paddle.enable_static()

884
    def test_errors(self):
885
        paddle.enable_static()
886
        with paddle.static.program_guard(paddle.static.Program()):
887
            # The input type must be Variable.
888
            self.assertRaises(TypeError, F.hardshrink, 1)
889
            # The input dtype must be float16, float32, float64.
890 891 892
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
893
            self.assertRaises(TypeError, F.hardshrink, x_int32)
894
            # support the input dtype is float16
895 896 897
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
898
            F.hardshrink(x_fp16)
899 900


901 902 903 904 905 906 907 908 909 910 911
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
912
        np.random.seed(1024)
913
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
914 915 916
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
917
            else paddle.CPUPlace()
918
        )
919 920

    def test_static_api(self):
921
        paddle.enable_static()
922
        with paddle.static.program_guard(paddle.static.Program()):
923
            x = paddle.fluid.data('X', [10, 12])
924 925 926 927 928 929 930
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
931
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
932 933 934

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
935
        x = paddle.to_tensor(self.x_np)
936 937 938 939 940
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
941
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
942 943 944 945 946 947

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
948
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
949 950 951
        paddle.enable_static()

    def test_errors(self):
952
        paddle.enable_static()
953 954 955 956
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
957 958 959
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
960 961
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
962 963 964
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
965 966 967
            F.hardtanh(x_fp16)


968 969 970
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
971 972
        out - threshold
    )
973 974 975 976
    return out


class TestSoftshrink(TestActivation):
977 978
    def setUp(self):
        self.op_type = "softshrink"
979 980
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
981
        self.init_dtype()
982
        self.init_shape()
983

984
        threshold = 0.8
985

986
        np.random.seed(1023)
987
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
988 989 990
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
991
        self.outputs = {'Out': out}
992 993

    def test_check_grad(self):
994 995
        if self.dtype == np.float16:
            return
996
        self.check_grad(['X'], 'Out', check_eager=True)
997

998

999 1000 1001 1002 1003
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1004 1005 1006 1007
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1008
        np.random.seed(1024)
1009
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1010 1011 1012
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1013
            else paddle.CPUPlace()
1014
        )
1015 1016

    def test_static_api(self):
1017
        paddle.enable_static()
1018
        with paddle.static.program_guard(paddle.static.Program()):
1019
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1020 1021 1022 1023 1024 1025 1026
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1027
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1028 1029 1030 1031 1032 1033 1034 1035 1036

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1037
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1038 1039
        paddle.enable_static()

1040
    def test_errors(self):
1041
        paddle.enable_static()
1042
        with paddle.static.program_guard(paddle.static.Program()):
1043
            # The input type must be Variable.
1044
            self.assertRaises(TypeError, F.softshrink, 1)
1045
            # The input dtype must be float16, float32, float64.
1046 1047 1048
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1049
            self.assertRaises(TypeError, F.softshrink, x_int32)
1050
            # The threshold must be no less than zero
1051 1052 1053
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1054
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1055
            # support the input dtype is float16
1056 1057 1058
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1059
            F.softshrink(x_fp16)
1060 1061


1062
class TestSqrt(TestActivation, TestParameter):
1063 1064
    def setUp(self):
        self.op_type = "sqrt"
1065
        self.python_api = paddle.sqrt
1066
        self.init_dtype()
1067
        self.init_shape()
1068

1069
        np.random.seed(1023)
1070
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1071 1072 1073 1074
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1075 1076

    def test_check_grad(self):
1077 1078
        if self.dtype == np.float16:
            return
1079 1080 1081 1082
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1083

1084

1085 1086 1087 1088 1089
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []


1090 1091 1092
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1093 1094 1095
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1096
        self.python_api = paddle.sqrt
1097
        self.init_dtype()
1098
        self.init_shape()
1099 1100

        np.random.seed(1023)
1101
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1112 1113 1114
    def init_shape(self):
        self.shape = [11, 17]

1115 1116
    def test_check_output(self):
        place = core.CUDAPlace(0)
1117
        self.check_output_with_place(place, check_eager=True)
1118 1119 1120

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1121
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1122 1123


Z
zhoukunsheng 已提交
1124 1125 1126
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1127
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1128
        self.init_dtype()
1129
        self.init_shape()
Z
zhoukunsheng 已提交
1130

1131
        np.random.seed(1024)
1132
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1133 1134 1135 1136 1137
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1138 1139 1140
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1141 1142 1143
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1144 1145 1146
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1147 1148


1149 1150 1151 1152 1153 1154 1155 1156
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1157
class TestAbs(TestActivation):
1158 1159
    def setUp(self):
        self.op_type = "abs"
1160
        self.init_dtype()
1161
        self.init_shape()
1162

1163
        np.random.seed(1024)
1164
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1165
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1166
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1167
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1168 1169
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1170 1171 1172 1173
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1174

1175 1176 1177
    def init_shape(self):
        self.shape = [4, 25]

1178
    def test_check_grad(self):
1179 1180
        if self.dtype == np.float16:
            return
1181
        self.check_grad(['X'], 'Out', check_eager=False)
1182

1183

1184 1185 1186 1187 1188
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1189
class TestCeil(TestActivation):
D
dzhwinter 已提交
1190 1191
    def setUp(self):
        self.op_type = "ceil"
1192 1193
        self.check_eager = True
        self.python_api = paddle.ceil
1194
        self.init_dtype()
1195
        self.init_shape()
1196

1197
        np.random.seed(1024)
1198
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1199 1200 1201 1202
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1203

1204 1205 1206
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1207
    # The same reason with TestFloor
C
chengduo 已提交
1208
    def test_check_grad(self):
1209 1210 1211
        pass


1212 1213 1214 1215 1216
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1217
class TestFloor(TestActivation):
D
dzhwinter 已提交
1218 1219
    def setUp(self):
        self.op_type = "floor"
1220 1221
        self.check_eager = True
        self.python_api = paddle.floor
1222
        self.init_dtype()
1223
        self.init_shape()
1224

1225
        np.random.seed(1024)
1226
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1227 1228 1229 1230
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1231

1232 1233 1234
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1235
    # the gradient on floor, ceil, round is undefined.
1236
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1237 1238
    # The same reason with TestFloor
    def test_check_grad(self):
1239 1240 1241
        pass


1242 1243 1244 1245 1246
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1247
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1248 1249
    def setUp(self):
        self.op_type = "cos"
1250
        self.init_dtype()
1251
        self.init_shape()
1252

1253
        np.random.seed(1024)
1254
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1255 1256 1257 1258
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1259

1260 1261 1262
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1263
    def test_check_grad(self):
1264 1265
        if self.dtype == np.float16:
            return
1266
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1267

1268

1269 1270 1271 1272 1273
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1274 1275 1276 1277 1278
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1279 1280
        self.init_shape()

J
joejiong 已提交
1281
        self.dtype = 'float32'
1282
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1283 1284 1285
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1286
            else paddle.CPUPlace()
1287
        )
J
joejiong 已提交
1288 1289 1290 1291 1292 1293

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1294 1295 1296
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1297 1298 1299 1300 1301
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1313 1314 1315
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1316
            else paddle.CPUPlace()
1317
        )
1318

J
joejiong 已提交
1319 1320 1321 1322 1323
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1324
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1325 1326 1327 1328 1329
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1330
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1331 1332 1333 1334
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1335
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1336 1337 1338 1339

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1340 1341 1342
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1343 1344 1345 1346 1347 1348 1349 1350
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1351 1352 1353 1354
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1355
        self.init_shape()
1356

1357
        np.random.seed(1024)
1358
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1359 1360 1361 1362 1363
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1364 1365 1366
    def init_shape(self):
        self.shape = [10, 12]

1367 1368 1369
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1370
        self.check_grad(['X'], 'Out')
1371 1372


1373 1374 1375 1376 1377
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1378
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1379 1380
    def setUp(self):
        self.op_type = "sin"
1381
        self.init_dtype()
1382
        self.init_shape()
1383

1384
        np.random.seed(1024)
1385
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1386 1387 1388 1389
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1390

1391 1392 1393
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1394
    def test_check_grad(self):
1395 1396
        if self.dtype == np.float16:
            return
1397
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1398 1399


1400 1401 1402 1403 1404
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1405 1406 1407 1408
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1409
        self.init_shape()
1410

1411
        np.random.seed(2048)
1412
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1413 1414 1415 1416 1417
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1418 1419 1420
    def init_shape(self):
        self.shape = [10, 12]

1421 1422 1423
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1424
        self.check_grad(['X'], 'Out')
1425 1426


1427 1428 1429 1430 1431
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1432 1433 1434 1435
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1436
        self.init_shape()
X
xiaoting 已提交
1437 1438

        np.random.seed(1024)
1439
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1440 1441 1442 1443 1444
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1445 1446 1447
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1448 1449 1450 1451 1452 1453
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1454 1455 1456 1457 1458
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1459 1460 1461 1462
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1463
        self.init_shape()
X
xiaoting 已提交
1464 1465

        np.random.seed(1024)
1466
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1467 1468 1469 1470 1471
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1472 1473 1474
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1475 1476 1477 1478 1479 1480
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1481 1482 1483 1484 1485
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1486 1487 1488 1489
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1490
        self.init_shape()
X
xiaoting 已提交
1491 1492

        np.random.seed(400)
1493
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1494 1495 1496 1497 1498
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1499 1500 1501
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1502 1503 1504 1505 1506 1507
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1508 1509 1510 1511 1512
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1513
class TestRound(TestActivation):
D
dzhwinter 已提交
1514 1515
    def setUp(self):
        self.op_type = "round"
1516 1517
        self.check_eager = True
        self.python_api = paddle.round
1518
        self.init_dtype()
1519
        self.init_shape()
1520

1521
        np.random.seed(1024)
1522
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1523 1524 1525 1526
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1527

1528 1529 1530
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1531
    def test_check_grad(self):
1532 1533 1534
        pass


1535 1536 1537 1538 1539
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1540
class TestRelu(TestActivation):
1541
    def setUp(self):
Q
qijun 已提交
1542
        self.op_type = "relu"
K
Kexin Zhao 已提交
1543
        self.init_dtype()
1544
        self.init_shape()
K
Kexin Zhao 已提交
1545

1546
        np.random.seed(1024)
1547
        if self.dtype == np.uint16:
1548
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1549 1550 1551 1552 1553
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1554
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1555 1556 1557 1558
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1559 1560

        self.outputs = {'Out': out}
1561 1562

    def test_check_grad(self):
K
Kexin Zhao 已提交
1563 1564
        if self.dtype == np.float16:
            return
1565
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1566 1567


1568 1569 1570 1571 1572
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1573 1574 1575
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1576
        np.random.seed(1024)
1577
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1578 1579 1580
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1581
            else paddle.CPUPlace()
1582
        )
1583 1584 1585 1586
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1587 1588

    def test_static_api(self):
1589
        paddle.enable_static()
1590
        with paddle.static.program_guard(paddle.static.Program()):
1591
            x = paddle.fluid.data('X', [10, 12])
1592
            out1 = self.relu(x)
1593 1594 1595 1596 1597 1598
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1599
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1600 1601 1602 1603 1604

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1605 1606
        out1 = m(x)
        out2 = self.relu(x)
1607 1608
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1609
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1610 1611
        paddle.enable_static()

1612
    def test_errors(self):
1613
        paddle.enable_static()
1614
        with paddle.static.program_guard(paddle.static.Program()):
1615
            # The input type must be Variable.
1616
            self.assertRaises(TypeError, self.relu, 1)
1617
            # The input dtype must be float16, float32, float64.
1618 1619 1620
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1621
            self.assertRaises(TypeError, self.relu, x_int32)
1622
            # support the input dtype is float16
1623 1624 1625
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1626 1627 1628 1629 1630 1631 1632
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1633 1634


1635 1636 1637 1638 1639 1640
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1641
class TestLeakyRelu(TestActivation):
1642 1643 1644
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1645 1646 1647
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1648
        self.init_shape()
1649
        alpha = self.get_alpha()
A
Adam 已提交
1650

1651
        np.random.seed(1024)
1652
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1653
        # The same reason with TestAbs
1654 1655
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1656

1657
        self.inputs = {'X': x}
A
Adam 已提交
1658
        self.outputs = {'Out': out}
1659
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1660 1661 1662 1663

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1664
        self.check_grad(['X'], 'Out')
1665 1666


1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1682 1683 1684 1685 1686
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1687 1688 1689
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1690
        np.random.seed(1024)
1691
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1692 1693 1694
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1695
            else paddle.CPUPlace()
1696
        )
1697 1698

    def test_static_api(self):
1699
        paddle.enable_static()
1700
        with paddle.static.program_guard(paddle.static.Program()):
1701
            x = paddle.fluid.data('X', [10, 12])
1702 1703 1704 1705 1706 1707 1708
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1709
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1710 1711 1712

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1713
        x = paddle.to_tensor(self.x_np)
1714 1715 1716 1717 1718
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1719
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1720 1721 1722 1723 1724 1725

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1726
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1727 1728
        paddle.enable_static()

1729
    def test_errors(self):
1730
        paddle.enable_static()
1731
        with paddle.static.program_guard(paddle.static.Program()):
1732
            # The input type must be Variable.
1733
            self.assertRaises(TypeError, F.leaky_relu, 1)
1734
            # The input dtype must be float16, float32, float64.
1735 1736 1737
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1738 1739
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1740 1741 1742
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1743
            F.leaky_relu(x_fp16)
1744 1745


1746 1747
def gelu(x, approximate):
    if approximate:
1748 1749 1750 1751 1752 1753 1754 1755
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1756 1757 1758 1759 1760 1761
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1762 1763 1764
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1765
        self.init_shape()
1766
        approximate = True
1767
        np.random.seed(1024)
1768
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1769
        out = gelu(x, approximate)
C
Clementine 已提交
1770

1771
        self.inputs = {'X': x}
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1785
        self.init_shape()
1786
        approximate = False
1787
        np.random.seed(2048)
1788
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1789
        out = gelu(x, approximate)
C
Clementine 已提交
1790

1791
        self.inputs = {'X': x}
C
Clementine 已提交
1792
        self.outputs = {'Out': out}
1793
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1794 1795 1796 1797

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1798
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1799 1800


1801 1802 1803 1804 1805
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1806 1807 1808
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1809
        np.random.seed(1024)
1810
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
1811 1812 1813
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1814
            else paddle.CPUPlace()
1815
        )
1816 1817

    def test_static_api(self):
1818
        paddle.enable_static()
1819
        with paddle.static.program_guard(paddle.static.Program()):
1820
            x = paddle.fluid.data('X', [11, 17])
1821 1822 1823 1824 1825 1826 1827
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1828
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1829 1830 1831 1832 1833 1834 1835 1836 1837

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1838
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1839 1840 1841 1842 1843 1844

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1845
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1846 1847 1848
        paddle.enable_static()

    def test_errors(self):
1849
        paddle.enable_static()
1850 1851 1852 1853
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1854 1855 1856
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
1857 1858
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1859 1860 1861
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
1862 1863 1864
            F.gelu(x_fp16)


C
chengduo 已提交
1865
class TestBRelu(TestActivation):
1866 1867
    def setUp(self):
        self.op_type = "brelu"
1868 1869
        self.init_dtype()

1870
        np.random.seed(1024)
Z
zhupengyang 已提交
1871
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1872 1873
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1874 1875
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1876
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1877 1878 1879
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1880 1881 1882

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1883
        self.outputs = {'Out': t}
1884 1885

    def test_check_grad(self):
1886 1887
        if self.dtype == np.float16:
            return
1888
        self.check_grad(['X'], 'Out')
1889

1890

1891 1892 1893 1894 1895 1896 1897
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1898
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
1899
    def setUp(self):
1900
        self.op_type = "relu6"
1901
        self.init_dtype()
1902
        self.init_shape()
1903
        self.python_api = paddle.nn.functional.relu6
1904

1905
        np.random.seed(1024)
1906
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
1907
        x[np.abs(x) < 0.005] = 0.02
1908
        out = ref_relu6(x)
1909

1910 1911
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1912
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1913

1914 1915 1916
    def init_shape(self):
        self.shape = [10, 12]

1917 1918 1919
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1920
        self.check_grad(['X'], 'Out', check_eager=True)
1921 1922


1923 1924 1925 1926 1927
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


1928 1929 1930
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
1931
        np.random.seed(1024)
1932 1933
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
1934 1935 1936
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1937
            else paddle.CPUPlace()
1938
        )
1939 1940

    def test_static_api(self):
1941
        paddle.enable_static()
1942
        with paddle.static.program_guard(paddle.static.Program()):
1943
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1944 1945 1946 1947 1948 1949 1950
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
1951
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1952 1953 1954 1955 1956 1957 1958 1959 1960

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
1961
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1962 1963 1964
        paddle.enable_static()

    def test_fluid_api(self):
1965
        paddle.enable_static()
1966 1967
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
1968
            out = paddle.nn.functional.relu6(x)
1969 1970 1971
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
1972
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1973

1974
    def test_errors(self):
1975
        paddle.enable_static()
1976
        with paddle.static.program_guard(paddle.static.Program()):
1977
            # The input type must be Variable.
1978
            self.assertRaises(TypeError, F.relu6, 1)
1979
            # The input dtype must be float16, float32, float64.
1980 1981 1982
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1983
            self.assertRaises(TypeError, F.relu6, x_int32)
1984
            # support the input dtype is float16
1985 1986 1987
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1988
            F.relu6(x_fp16)
1989 1990


1991
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
1992 1993 1994 1995
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
1996 1997 1998
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
1999 2000


H
huangjun12 已提交
2001 2002 2003 2004
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2005
        self.init_shape()
2006
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2007

2008
        np.random.seed(1024)
2009
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2010 2011 2012
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2013
        # the same with TestAbs
H
huangjun12 已提交
2014 2015
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2016
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2017

2018
        self.inputs = {'X': x}
H
huangjun12 已提交
2019 2020 2021
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

2022 2023 2024
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2025
    def test_check_grad(self):
2026 2027 2028 2029
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
2030 2031


2032 2033 2034 2035 2036
class TestHardSwish_ZeroDim(TestHardSwish):
    def init_shape(self):
        self.shape = []


2037 2038 2039 2040
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2041 2042 2043
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2044
            else paddle.CPUPlace()
2045
        )
2046 2047 2048

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2049
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2050 2051 2052 2053 2054 2055 2056
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2057
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2058 2059 2060

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2061
        x = paddle.to_tensor([11648.0, 11448.0])
2062 2063 2064
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2065
        out_ref = [11648.0, 11448.0]
2066
        for r in [out1, out2]:
2067
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2068
        paddle.enable_static()
2069 2070 2071 2072

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2073
            out = paddle.nn.functional.hardswish(x)
2074 2075 2076
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2077
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2078 2079 2080

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2081
        out = paddle.nn.functional.hardswish(x)
2082
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2083 2084 2085 2086
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2087
            # The input type must be Variable.
2088
            self.assertRaises(TypeError, F.hardswish, 1)
2089
            # The input dtype must be float16, float32, float64.
2090 2091 2092
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2093
            self.assertRaises(TypeError, F.hardswish, x_int32)
2094
            # support the input dtype is float16
2095 2096 2097
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2098
            F.hardswish(x_fp16)
2099 2100


C
chengduo 已提交
2101
class TestSoftRelu(TestActivation):
2102 2103
    def setUp(self):
        self.op_type = "soft_relu"
2104 2105
        self.init_dtype()

2106
        np.random.seed(4096)
2107
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2108
        threshold = 2.0
Q
qijun 已提交
2109 2110
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2111
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2112 2113 2114
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2115 2116 2117 2118 2119
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2120 2121

    def test_check_grad(self):
2122 2123
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2124
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2125

2126

2127
def elu(x, alpha):
Z
zhupengyang 已提交
2128
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2129 2130 2131
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2132
class TestELU(TestActivation):
2133 2134
    def setUp(self):
        self.op_type = "elu"
2135
        self.init_dtype()
2136
        self.init_shape()
2137

2138
        np.random.seed(1024)
2139
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2140
        alpha = self.get_alpha()
2141
        out = elu(x, alpha)
2142 2143 2144 2145
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2146
        self.outputs = {'Out': out}
2147

2148 2149 2150
    def init_shape(self):
        self.shape = [10, 12]

2151
    def test_check_grad(self):
2152 2153
        if self.dtype == np.float16:
            return
2154
        self.check_grad(['X'], 'Out')
2155

Z
zhupengyang 已提交
2156
    def get_alpha(self):
2157
        return 1.0
Z
zhupengyang 已提交
2158 2159 2160 2161 2162 2163


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2164

2165 2166 2167 2168 2169
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2170 2171 2172
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2173
        np.random.seed(1024)
2174
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2175 2176 2177
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2178
            else paddle.CPUPlace()
2179
        )
2180 2181 2182 2183
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2184 2185

    def test_static_api(self):
2186
        paddle.enable_static()
2187
        with paddle.static.program_guard(paddle.static.Program()):
2188
            x = paddle.fluid.data('X', [10, 12])
2189
            out1 = self.elu(x)
2190 2191 2192 2193 2194 2195
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2196
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2197 2198 2199 2200

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2201 2202
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2203 2204 2205 2206
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2207
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2208

2209 2210
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2211 2212 2213 2214
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2215
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2216 2217
        paddle.enable_static()

2218
    def test_errors(self):
2219
        paddle.enable_static()
2220 2221
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2222
            self.assertRaises(TypeError, self.elu, 1)
2223
            # The input dtype must be float16, float32, float64.
2224 2225 2226
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2227
            self.assertRaises(TypeError, self.elu, x_int32)
2228
            # support the input dtype is float16
2229 2230 2231
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2232 2233 2234
            self.elu(x_fp16)


Z
zhupengyang 已提交
2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2247 2248 2249 2250 2251 2252 2253 2254 2255
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2256
        self.init_shape()
2257

2258
        self.python_api = paddle.nn.functional.celu
2259
        np.random.seed(1024)
2260
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2261 2262 2263 2264 2265 2266
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2267 2268 2269
    def init_shape(self):
        self.shape = [10, 12]

2270 2271 2272
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2273
        self.check_grad(['X'], 'Out', check_eager=True)
2274 2275


2276 2277 2278 2279 2280
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2281 2282 2283 2284 2285
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2286 2287 2288
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2289
            else paddle.CPUPlace()
2290
        )
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2307
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2308 2309 2310 2311 2312 2313 2314 2315 2316 2317

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2318
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2319 2320 2321 2322 2323 2324 2325

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2326
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2327 2328 2329 2330 2331 2332 2333 2334
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2335 2336 2337
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2338 2339
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2340 2341 2342
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2343 2344
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2345 2346 2347
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2348 2349 2350
            self.celu(x_fp16)


C
chengduo 已提交
2351
class TestReciprocal(TestActivation):
Q
qijun 已提交
2352 2353
    def setUp(self):
        self.op_type = "reciprocal"
2354
        self.python_api = paddle.reciprocal
2355
        self.init_dtype()
2356
        self.init_shape()
2357

2358
        np.random.seed(1024)
2359
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2360 2361 2362 2363
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2364 2365

    def test_check_grad(self):
2366 2367
        if self.dtype == np.float16:
            return
2368 2369 2370 2371
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2372 2373


2374 2375 2376 2377 2378
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2379
class TestLog(TestActivation):
Q
qijun 已提交
2380 2381
    def setUp(self):
        self.op_type = "log"
2382 2383
        self.check_eager = True
        self.python_api = paddle.log
2384
        self.init_dtype()
2385
        self.init_shape()
2386

2387
        np.random.seed(1024)
2388
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2389 2390 2391 2392
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2393 2394

    def test_check_grad(self):
2395 2396
        if self.dtype == np.float16:
            return
2397
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2398

2399
    def test_error(self):
G
GGBond8488 已提交
2400 2401
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
2402

2403 2404
        self.assertRaises(TypeError, paddle.log, in1)
        self.assertRaises(TypeError, paddle.log, in2)
2405

2406

2407 2408 2409 2410 2411
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2412 2413 2414
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2415 2416
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2417
        self.init_dtype()
2418
        self.init_shape()
J
joejiong 已提交
2419

2420
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2421 2422 2423 2424 2425 2426 2427 2428
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2429
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2430 2431 2432 2433 2434 2435 2436 2437 2438

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2439 2440 2441
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2442
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2443 2444 2445
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2446 2447 2448 2449

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2450 2451 2452 2453 2454
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2455
        expected_res = np.log2(input_x)
2456
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2457 2458 2459 2460 2461 2462 2463 2464

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2465
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2466 2467


2468 2469 2470 2471 2472
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2473 2474 2475
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2476 2477
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2478
        self.init_dtype()
2479
        self.init_shape()
J
joejiong 已提交
2480

2481
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2482 2483 2484 2485 2486 2487 2488 2489
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2490
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2491

2492 2493 2494 2495 2496 2497 2498

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2499 2500 2501 2502 2503 2504 2505 2506
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2507 2508 2509
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2510
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2511 2512 2513
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2514 2515 2516 2517

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2518 2519 2520 2521 2522
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2523
        expected_res = np.log10(input_x)
2524
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2525 2526 2527 2528 2529 2530 2531 2532

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2533
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2534 2535


2536 2537 2538
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2539 2540
        self.check_eager = True
        self.python_api = paddle.log1p
2541
        self.init_dtype()
2542
        self.init_shape()
2543

2544
        np.random.seed(1024)
2545
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2546 2547 2548 2549 2550 2551 2552 2553
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2554
        self.check_grad(['X'], 'Out', check_eager=True)
2555

2556 2557 2558 2559 2560 2561 2562

class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2563 2564 2565
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
G
GGBond8488 已提交
2566
            data_x = paddle.static.data(
2567 2568 2569 2570
                name="data_x",
                shape=[11, 17],
                dtype="float64",
            )
2571 2572 2573 2574

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2575 2576 2577 2578 2579
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2580
        expected_res = np.log1p(input_x)
2581
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2582 2583 2584 2585 2586 2587 2588 2589

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2590
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2591 2592


C
chengduo 已提交
2593
class TestSquare(TestActivation):
Q
qijun 已提交
2594 2595
    def setUp(self):
        self.op_type = "square"
2596
        self.python_api = paddle.square
2597
        self.init_dtype()
2598
        self.init_shape()
2599

2600
        np.random.seed(1024)
2601
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2602 2603 2604 2605
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2606 2607

    def test_check_grad(self):
2608 2609
        if self.dtype == np.float16:
            return
2610 2611 2612
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2613 2614 2615

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2616

2617

2618 2619 2620 2621 2622
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2623 2624 2625
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2626 2627 2628
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2629
        self.python_api = paddle.square
2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2646
        self.check_output_with_place(place, check_eager=True)
2647 2648 2649

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2650 2651 2652
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2653 2654


C
chengduo 已提交
2655
class TestPow(TestActivation):
2656 2657
    def setUp(self):
        self.op_type = "pow"
2658
        self.python_api = paddle.pow
2659
        self.check_eager = True
2660
        self.init_dtype()
2661
        self.init_shape()
2662

2663
        np.random.seed(1024)
2664
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2665 2666 2667
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2668
        self.attrs = {'factor': 3.0}
2669
        self.outputs = {'Out': out}
2670

2671 2672 2673
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2674
    def test_check_grad(self):
2675 2676
        if self.dtype == np.float16:
            return
2677
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2678

2679

2680 2681 2682 2683 2684
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2685 2686 2687
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2688 2689
        self.check_eager = False
        self.python_api = paddle.pow
2690 2691
        self.init_dtype()

2692
        np.random.seed(1024)
2693 2694 2695 2696 2697
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2698
            'FactorTensor': np.array([3.0]).astype("float32"),
2699 2700 2701 2702 2703 2704
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2705
        self.check_output(check_eager=self.check_eager)
2706 2707 2708 2709

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2710
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2711 2712 2713

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
G
GGBond8488 已提交
2714 2715
        x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
        res = paddle.static.data(name="res", shape=[11, 17], dtype="float32")
2716 2717 2718

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
2719 2720
        out_1 = paddle.pow(x, factor_1)
        out_2 = paddle.pow(x, factor_2)
2721 2722 2723
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2724 2725

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2726
        res_1, res_2, res, res_6 = exe.run(
2727 2728
            fluid.default_main_program(),
            feed={"x": input},
2729 2730
            fetch_list=[out_1, out_2, res, out_6],
        )
2731

2732 2733 2734
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2735 2736


2737 2738 2739 2740 2741
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2742
class TestSTanh(TestActivation):
2743 2744 2745 2746 2747 2748
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2749 2750
    def setUp(self):
        self.op_type = "stanh"
2751
        self.init_dtype()
2752 2753
        self.init_shape()

2754 2755
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2756

2757
        np.random.seed(1024)
2758
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2759 2760
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2761

2762
        self.inputs = {'X': x}
2763
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2764
        self.outputs = {'Out': out}
2765

Q
qijun 已提交
2766
    def test_check_grad(self):
2767 2768
        if self.dtype == np.float16:
            return
2769
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2770

2771

2772 2773 2774 2775 2776 2777 2778 2779 2780 2781
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


2782 2783 2784 2785 2786
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
2800 2801 2802
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
2803
            else paddle.CPUPlace()
2804
        )
2805 2806 2807 2808 2809 2810 2811 2812 2813 2814

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2815
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2816 2817 2818 2819 2820 2821 2822

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2823
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2824 2825 2826 2827 2828 2829
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
2830
            out = paddle.stanh(x, self.scale_a, self.scale_b)
2831 2832 2833
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2834
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2835

2836
    def test_errors(self):
2837 2838
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2839
            # The input type must be Variable.
2840
            self.assertRaises(TypeError, paddle.stanh, 1)
2841
            # The input dtype must be float16, float32, float64.
2842 2843 2844
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2845
            self.assertRaises(TypeError, paddle.stanh, x_int32)
2846
            # support the input dtype is float16
2847 2848 2849
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
2861 2862


2863 2864
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
2865 2866 2867 2868
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
2869 2870 2871
    return out


C
chengduo 已提交
2872
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
2873 2874
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
2875
        self.python_api = paddle.nn.functional.softplus
2876
        self.init_dtype()
2877
        self.init_shape()
2878

2879 2880
        beta = 2
        threshold = 15
2881

2882
        np.random.seed(1024)
2883
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2884 2885 2886
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
2887
        self.outputs = {'Out': out}
K
kexinzhao 已提交
2888

W
Wang Bojun 已提交
2889 2890
        self.check_eager = True

2891 2892 2893
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
2894
    def test_check_grad(self):
2895 2896
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
2897 2898 2899
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
2900

2901

2902 2903 2904 2905 2906
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


2907 2908 2909
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


2937 2938 2939 2940 2941
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
2942
        np.random.seed(1024)
2943
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2944 2945 2946
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2947
            else paddle.CPUPlace()
2948
        )
2949 2950

    def test_static_api(self):
2951
        paddle.enable_static()
2952
        with paddle.static.program_guard(paddle.static.Program()):
2953
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2954 2955 2956 2957 2958 2959 2960
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
2961
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2962 2963 2964 2965 2966 2967 2968 2969 2970

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
2971
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2972 2973 2974
        paddle.enable_static()

    def test_errors(self):
2975
        paddle.enable_static()
2976 2977 2978 2979
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
2980 2981 2982
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2983 2984
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
2985 2986 2987
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2988 2989 2990 2991 2992 2993 2994 2995
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
2996
class TestSoftsign(TestActivation):
2997 2998
    def setUp(self):
        self.op_type = "softsign"
2999
        self.init_dtype()
3000 3001
        self.init_shape()

3002
        self.python_api = paddle.nn.functional.softsign
3003

3004
        np.random.seed(1024)
3005
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3006 3007
        out = ref_softsign(x)
        self.inputs = {'X': x}
3008
        self.outputs = {'Out': out}
3009

3010 3011 3012
    def init_shape(self):
        self.shape = [10, 12]

3013
    def test_check_grad(self):
3014 3015
        if self.dtype == np.float16:
            return
3016
        self.check_grad(['X'], 'Out', check_eager=True)
3017 3018


3019 3020 3021 3022 3023
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3024 3025 3026
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3027
        np.random.seed(1024)
3028
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3029 3030 3031
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3032
            else paddle.CPUPlace()
3033
        )
3034 3035

    def test_static_api(self):
3036
        paddle.enable_static()
3037
        with paddle.static.program_guard(paddle.static.Program()):
3038
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3039 3040 3041 3042 3043 3044 3045
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3046
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3047 3048 3049 3050 3051 3052 3053 3054 3055

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3056
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3057 3058 3059
        paddle.enable_static()

    def test_errors(self):
3060
        paddle.enable_static()
3061 3062 3063 3064
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3065 3066 3067
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3068 3069
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3070 3071 3072
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3073 3074 3075
            F.softsign(x_fp16)


3076 3077 3078 3079 3080
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3081
class TestThresholdedRelu(TestActivation):
3082 3083
    def setUp(self):
        self.op_type = "thresholded_relu"
3084
        self.init_dtype()
3085
        self.init_shape()
3086

3087
        threshold = 15
3088

3089
        np.random.seed(1024)
3090
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3091 3092 3093 3094
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3095
        self.outputs = {'Out': out}
3096

3097 3098 3099
    def init_shape(self):
        self.shape = [10, 12]

3100
    def test_check_grad(self):
3101 3102
        if self.dtype == np.float16:
            return
3103
        self.check_grad(['X'], 'Out')
3104 3105


3106 3107 3108 3109 3110
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3111 3112 3113 3114 3115 3116 3117
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3118 3119 3120
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3121
            else paddle.CPUPlace()
3122
        )
3123 3124 3125 3126

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3127
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3128 3129 3130 3131 3132 3133 3134
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3135
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3136 3137 3138 3139 3140 3141 3142 3143 3144

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3145
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3146 3147
        paddle.enable_static()

3148
    def test_errors(self):
3149 3150
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3151
            # The input type must be Variable.
3152
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3153
            # The input dtype must be float16, float32, float64.
3154 3155 3156
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3157
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3158
            # support the input dtype is float16
3159 3160 3161
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3162
            F.thresholded_relu(x_fp16)
3163 3164


3165
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3166
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3167 3168


C
chengduo 已提交
3169
class TestHardSigmoid(TestActivation):
3170 3171
    def setUp(self):
        self.op_type = "hard_sigmoid"
3172 3173 3174 3175
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3176
        self.init_shape()
3177

3178
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3179
        lower_threshold = -self.offset / self.slope
3180
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3181

3182
        # Same reason as TestAbs
3183 3184 3185
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3186

3187
        out = ref_hardsigmoid(x, self.slope, self.offset)
3188

3189 3190
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3191
        self.outputs = {'Out': out}
3192

3193 3194 3195
    def init_shape(self):
        self.shape = [10, 12]

3196 3197
    def set_attrs(self):
        pass
3198

3199

3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3211 3212 3213 3214 3215
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3216 3217 3218 3219
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3220 3221 3222
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3223
            else paddle.CPUPlace()
3224
        )
3225 3226 3227

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3228
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3229 3230 3231 3232 3233 3234 3235
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3236
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3237 3238 3239 3240 3241 3242 3243 3244 3245

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3246
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3247
        paddle.enable_static()
3248 3249 3250 3251

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3252
            out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3253 3254 3255
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3256
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3257 3258 3259

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
3260
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3261
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3262 3263 3264 3265
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3266
            # The input type must be Variable.
3267
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3268
            # The input dtype must be float16, float32, float64.
3269 3270 3271
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3272
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3273
            # support the input dtype is float16
3274 3275 3276
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3277
            F.hardsigmoid(x_fp16)
3278 3279


3280 3281 3282 3283 3284
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3285
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3286 3287
    def setUp(self):
        self.op_type = "swish"
3288
        self.python_api = paddle.nn.functional.swish
3289
        self.init_dtype()
3290 3291
        self.init_shape()

3292
        self.check_eager = True
3293

3294
        np.random.seed(1024)
3295
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3296 3297
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3298
        self.attrs = {'beta': 1.0}
3299
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3300

3301 3302 3303
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3304
    def test_check_grad(self):
3305 3306
        if self.dtype == np.float16:
            return
3307 3308 3309 3310
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3311

A
Abhinav Arora 已提交
3312

3313 3314 3315 3316 3317
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3318 3319 3320 3321 3322
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3323 3324 3325
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3326
            else paddle.CPUPlace()
3327
        )
3328 3329 3330 3331

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3332
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3333 3334 3335 3336 3337 3338 3339
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3340
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3341

3342
    def test_dygraph_api(self):
3343 3344 3345 3346 3347 3348 3349
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3350
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3351 3352 3353 3354 3355 3356
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3357
            out = paddle.nn.functional.swish(x)
3358 3359 3360
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3361
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3362

3363
    def test_errors(self):
3364 3365
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3366
            # The input type must be Variable.
3367
            self.assertRaises(TypeError, F.swish, 1)
3368
            # The input dtype must be float16, float32, float64.
3369 3370 3371
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3372
            self.assertRaises(TypeError, F.swish, x_int32)
3373
            # support the input dtype is float16
3374 3375 3376
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3377
            F.swish(x_fp16)
3378 3379


3380 3381 3382 3383
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3384 3385 3386 3387 3388 3389
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3390
        self.python_api = paddle.nn.functional.mish
3391
        self.init_dtype()
3392
        self.init_shape()
3393 3394

        np.random.seed(1024)
3395
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3396 3397 3398 3399
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3400 3401 3402
    def init_shape(self):
        self.shape = [10, 12]

3403 3404 3405
    def test_check_output(self):
        self.check_output(check_eager=True)

3406 3407 3408
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3409
        self.check_grad(['X'], 'Out', check_eager=True)
3410 3411


3412 3413 3414 3415 3416
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3417 3418 3419 3420 3421
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3422 3423 3424
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3425
            else paddle.CPUPlace()
3426
        )
3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3439
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3440 3441 3442 3443 3444 3445 3446 3447 3448

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3449
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3450 3451 3452 3453 3454 3455
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3456
            out = paddle.nn.functional.mish(x)
3457 3458 3459
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3460
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3461 3462 3463 3464 3465 3466 3467

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3468 3469 3470
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3471 3472
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3473 3474 3475
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3476 3477 3478
            F.mish(x_fp16)


3479
# ------------------ Test Cudnn Activation----------------------
3480
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3481 3482 3483
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3499 3500 3501 3502 3503 3504 3505
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
    parent, atol=1e-3, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3506 3507 3508
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3509

C
chengduo 已提交
3510
        def test_check_output(self):
3511
            place = core.CUDAPlace(0)
C
chengduo 已提交
3512 3513 3514
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3515

C
chengduo 已提交
3516 3517 3518 3519
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3520 3521 3522
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol
                )
C
chengduo 已提交
3523 3524 3525 3526 3527 3528 3529

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3530
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3531
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3532
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3533 3534
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3535
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3536
create_test_act_fp16_class(TestHardShrink)
3537
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3538 3539 3540 3541 3542
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3543
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3544
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3545
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3546
create_test_act_fp16_class(TestSin)
3547
create_test_act_fp16_class(TestSinh)
3548 3549
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3550 3551 3552
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3553 3554
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3555
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3556 3557
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3558
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3559
create_test_act_fp16_class(TestELU)
3560
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3561 3562
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3563 3564 3565 3566
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3567
create_test_act_fp16_class(TestLog10, atol=5e-2)
3568
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3569 3570
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3571
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3572 3573 3574 3575 3576
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3577
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3578
create_test_act_fp16_class(TestHardSwish)
3579
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3580

3581

3582 3583 3584 3585 3586 3587
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3598 3599 3600
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3601 3602 3603 3604 3605 3606 3607

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3608
create_test_act_bf16_class(TestAbs)
3609

Q
qijun 已提交
3610 3611
if __name__ == "__main__":
    unittest.main()