test_activation_op.py 111.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15
import unittest
J
joejiong 已提交
16

Q
qijun 已提交
17
import numpy as np
18
from op_test import OpTest, convert_float_to_uint16
19 20
from scipy.special import erf, expit

21
import paddle
J
joejiong 已提交
22 23
import paddle.fluid as fluid
import paddle.fluid.core as core
24
import paddle.nn.functional as F
25
from paddle.fluid import Program, program_guard
Q
qijun 已提交
26

27 28
paddle.enable_static()

Q
qijun 已提交
29

30
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
31 32 33 34
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
35
            self.assertRaises(TypeError, paddle.sqrt, in1)
Z
Zhaolong Xing 已提交
36
            # The input dtype of sqrt op must be float16, float32, float64.
37 38 39
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32"
            )
40
            self.assertRaises(TypeError, paddle.sqrt, in2)
Z
Zhaolong Xing 已提交
41

42 43 44
            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16"
            )
45
            paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
46 47


C
chengduo 已提交
48
class TestActivation(OpTest):
Q
qijun 已提交
49 50
    def setUp(self):
        self.op_type = "exp"
51
        self.init_dtype()
52
        self.init_shape()
53
        self.init_kernel_type()
C
chentianyu03 已提交
54 55
        self.check_eager = True
        self.python_api = paddle.exp
56

57
        np.random.seed(2049)
58
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
59 60 61 62
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
63 64

    def test_check_output(self):
65 66 67 68
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_output(check_eager=check_eager)
Q
qijun 已提交
69 70

    def test_check_grad(self):
71 72
        if self.dtype == np.float16:
            return
73 74 75 76
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
Q
qijun 已提交
77

78
    def init_dtype(self):
79
        self.dtype = np.float64
80

81 82 83
    def init_shape(self):
        self.shape = [11, 17]

84 85 86
    def init_kernel_type(self):
        pass

Q
qijun 已提交
87

88 89 90 91 92
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
93 94 95
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
96
        self.python_api = paddle.expm1
R
ronnywang 已提交
97
        self.init_dtype()
98
        self.init_shape()
R
ronnywang 已提交
99 100

        np.random.seed(2049)
101
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
102 103 104 105 106 107
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
108 109 110 111
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
R
ronnywang 已提交
112 113


114 115 116 117 118
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        paddle.enable_static()

        def run(place):
            with paddle.static.program_guard(paddle.static.Program()):
                X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
                out = paddle.expm1(X)
                exe = paddle.static.Executor(place)
                res = exe.run(feed={'X': self.x})
            for r in res:
143
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
144 145 146 147 148 149 150 151 152

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            paddle.disable_static(place)
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
153
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
154 155 156 157 158 159 160 161 162 163 164 165 166
            paddle.enable_static()

        for place in self.place:
            run(place)

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            X = paddle.fluid.data('X', self.shape, dtype='int32')
            self.assertRaises(TypeError, paddle.expm1, X)
        # The input dtype must be float16, float32, float64.


167
class TestParameter:
168 169
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
W
WuHaobo 已提交
170
            np_x = np.array([0.1])
171
            data = fluid.layers.data(name="X", shape=[1])
W
WuHaobo 已提交
172
            out = eval("paddle.%s(data, name='Y')" % self.op_type)
173 174
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
175
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
176
            expected = eval("np.%s(np_x)" % self.op_type)
177
            np.testing.assert_allclose(result, expected, rtol=1e-05)
178 179 180 181 182 183 184

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
185
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
186 187


C
chengduo 已提交
188
class TestSigmoid(TestActivation):
Q
qijun 已提交
189 190
    def setUp(self):
        self.op_type = "sigmoid"
191
        self.init_dtype()
192
        self.init_shape()
193

194
        np.random.seed(1024)
195
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
196 197 198 199
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
200

201 202 203
    def init_dtype(self):
        self.dtype = np.float32

204
    def test_check_grad(self):
205 206 207 208
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

209

210 211 212 213 214
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


215 216 217
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
218 219 220 221
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
        self.init_dtype()
222
        self.init_shape()
223 224

        np.random.seed(1024)
225
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
226 227 228 229 230 231 232 233 234 235
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

236 237 238
    def init_shape(self):
        self.shape = [11, 17]

239 240 241 242 243 244 245 246 247
    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


248 249 250 251 252 253 254 255
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
256 257 258 259
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
        self.init_dtype()
260
        self.init_shape()
M
minghaoBD 已提交
261 262

        np.random.seed(1024)
263
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


278 279 280 281 282
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []


M
minghaoBD 已提交
283 284 285 286
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
287 288 289
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
290
            else paddle.CPUPlace()
291
        )
M
minghaoBD 已提交
292 293 294 295 296 297 298 299 300 301 302 303

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [11, 17])
            out1 = F.silu(x)
            m = paddle.nn.Silu()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in res:
304
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
305 306 307 308 309 310 311 312 313

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
314
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
315 316 317 318 319 320 321
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.silu, 1)
            # The input dtype must be float16, float32, float64.
322 323 324
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
M
minghaoBD 已提交
325 326
            self.assertRaises(TypeError, F.silu, x_int32)
            # support the input dtype is float16
327 328 329
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
M
minghaoBD 已提交
330 331 332
            F.silu(x_fp16)


C
chengduo 已提交
333
class TestLogSigmoid(TestActivation):
334 335
    def setUp(self):
        self.op_type = "logsigmoid"
336
        self.init_dtype()
337
        self.init_shape()
338

339
        np.random.seed(2048)
340
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
341 342
        out = np.log(1 / (1 + np.exp(-x)))

343
        self.inputs = {'X': x}
344
        self.outputs = {'Out': out}
345 346

    def test_check_grad(self):
347 348
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
349
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
350 351


352 353 354 355 356
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


357
class TestLogSigmoidAPI(unittest.TestCase):
358
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
359
    def setUp(self):
360
        np.random.seed(1024)
361
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
362 363 364
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
365
            else paddle.CPUPlace()
366
        )
367 368

    def test_static_api(self):
369
        paddle.enable_static()
370
        with paddle.static.program_guard(paddle.static.Program()):
371
            x = paddle.fluid.data('X', [11, 17])
372
            out1 = F.log_sigmoid(x)
373 374 375 376 377 378
            m = paddle.nn.LogSigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in res:
379
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
380 381 382 383

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
384
        out1 = F.log_sigmoid(x)
385 386 387 388
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
389
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
390 391 392
        paddle.enable_static()

    def test_errors(self):
393
        paddle.enable_static()
394 395
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
396
            self.assertRaises(TypeError, F.log_sigmoid, 1)
397
            # The input dtype must be float16, float32, float64.
398 399 400
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
401
            self.assertRaises(TypeError, F.log_sigmoid, x_int32)
402
            # support the input dtype is float16
403 404 405
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
406
            F.log_sigmoid(x_fp16)
407 408


409
class TestTanh(TestActivation, TestParameter):
410 411
    def setUp(self):
        self.op_type = "tanh"
412
        self.init_dtype()
413 414
        self.init_shape()

415
        np.random.seed(1024)
416
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
417 418 419 420
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
421 422

    def test_check_grad(self):
423 424
        if self.dtype == np.float16:
            return
425
        self.check_grad(['X'], 'Out')
426

427
    def init_dtype(self):
428
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
429 430 431 432
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

433

434 435 436 437 438
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
439 440 441 442
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
443
        np.random.seed(1024)
W
WangXi 已提交
444
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
445 446 447
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
448
            else paddle.CPUPlace()
449
        )
450 451 452 453
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
454 455

    def test_static_api(self):
456
        paddle.enable_static()
W
WangXi 已提交
457
        with paddle.static.program_guard(paddle.static.Program()):
458
            x = paddle.fluid.data('X', [10, 12], self.dtype)
459
            out1 = self.tanh(x)
W
WangXi 已提交
460 461 462 463 464 465
            th = paddle.nn.Tanh()
            out2 = th(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.tanh(self.x_np)
        for r in res:
466
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
467 468 469

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
470
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
471 472 473 474 475 476
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
477
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
478 479 480
        paddle.enable_static()

    def test_errors(self):
481
        paddle.enable_static()
W
WangXi 已提交
482 483
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
484
            self.assertRaises(TypeError, self.tanh, 1)
W
WangXi 已提交
485
            # The input dtype must be float16, float32.
486 487 488
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
489
            self.assertRaises(TypeError, self.tanh, x_int32)
W
WangXi 已提交
490
            # support the input dtype is float16
491 492 493
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
494 495 496 497 498 499 500
            self.tanh(x_fp16)


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
501 502


503
class TestAtan(TestActivation, TestParameter):
504 505 506
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()
507
        self.init_shape()
508

509
        np.random.seed(1024)
510
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
511 512 513 514 515 516 517 518
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
519
        self.check_grad(['X'], 'Out')
520

W
WuHaobo 已提交
521 522 523 524 525 526 527
    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            np_x = np.array([0.1])
            data = fluid.layers.data(name="X", shape=[1])
            out = paddle.atan(data, name='Y')
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
528
            (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
W
WuHaobo 已提交
529 530 531
            expected = np.arctan(np_x)
            self.assertEqual(result, expected)

532 533 534 535 536 537 538 539
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

540

541 542 543 544 545
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


546 547 548 549
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
        self.init_dtype()
550
        self.init_shape()
551

552
        np.random.seed(1024)
553
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
554 555 556 557 558 559 560 561 562 563
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

564 565 566 567 568 569 570

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
571 572 573 574
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
575
            z = paddle.sinh(x).numpy()
576
            z_expected = np.sinh(np_x)
577
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
578 579 580 581

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
582 583 584 585 586 587 588 589 590
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
591

592
            pd_sinh_out = paddle.sinh(data_x)
593 594
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
595 596 597 598 599
            (np_sinh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_sinh_out],
            )
600 601

        expected_res = np.sinh(input_x)
602
        np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
603 604 605 606

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
607 608 609
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
610 611
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
612
            loss = paddle.sinh(var)
613 614 615 616 617 618 619 620 621
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
622
            self.assertRaises(TypeError, paddle.sinh, 1)
623 624
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
625
            self.assertRaises(TypeError, paddle.sinh, x_int32)
626 627
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
628
            paddle.sinh(x_fp16)
629 630 631 632 633 634


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
        self.init_dtype()
635
        self.init_shape()
636

637
        np.random.seed(1024)
638
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
639 640 641 642 643 644 645 646 647 648
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

649 650 651 652 653 654 655

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
656 657 658 659
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
660
            z = paddle.cosh(x).numpy()
661
            z_expected = np.cosh(np_x)
662
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
663 664 665 666

    def test_api(self):
        test_data_shape = [11, 17]
        with fluid.program_guard(fluid.Program(), fluid.Program()):
667 668 669 670 671 672 673 674 675
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
            data_x = fluid.layers.data(
                name="data_x",
                shape=test_data_shape,
                append_batch_size=False,
                dtype="float32",
            )
676 677 678 679

            pd_cosh_out = paddle.cosh(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
680 681 682 683 684
            (np_cosh_res,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[pd_cosh_out],
            )
685 686

        expected_res = np.cosh(input_x)
687
        np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
688 689 690 691

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
692 693 694
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
695 696
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
697
            loss = paddle.cosh(var)
698 699 700 701 702 703 704 705 706
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
        with program_guard(Program()):
            # The input type must be Variable.
707
            self.assertRaises(TypeError, paddle.cosh, 1)
708 709
            # The input dtype must be float16, float32, float64.
            x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
710
            self.assertRaises(TypeError, paddle.cosh, x_int32)
711 712
            # support the input dtype is float16
            x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
713
            paddle.cosh(x_fp16)
714 715


716 717 718 719 720 721
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
722 723
    def setUp(self):
        self.op_type = "tanh_shrink"
724
        self.init_dtype()
725
        self.init_shape()
726

727
        np.random.seed(1024)
728
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
729
        out = ref_tanhshrink(x)
730

731
        self.inputs = {'X': x}
732
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
733 734

    def test_check_grad(self):
735 736
        if self.dtype == np.float16:
            return
737
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
738

739

740 741 742 743 744
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


745 746 747
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
748
        np.random.seed(1024)
749
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
750 751 752
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
753
            else paddle.CPUPlace()
754
        )
755 756

    def test_static_api(self):
757
        paddle.enable_static()
758
        with paddle.static.program_guard(paddle.static.Program()):
759
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
760 761 762 763 764 765 766
            out1 = F.tanhshrink(x)
            tanhshrink = paddle.nn.Tanhshrink()
            out2 = tanhshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_tanhshrink(self.x_np)
        for r in res:
767
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
768 769 770 771 772 773 774 775 776

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
777
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
778 779 780
        paddle.enable_static()

    def test_errors(self):
781
        paddle.enable_static()
782 783 784 785
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.tanhshrink, 1)
            # The input dtype must be float16, float32, float64.
786 787 788
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
789 790
            self.assertRaises(TypeError, F.tanhshrink, x_int32)
            # support the input dtype is float16
791 792 793
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
794 795 796
            F.tanhshrink(x_fp16)


797 798 799 800 801 802
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
803
class TestHardShrink(TestActivation):
804 805
    def setUp(self):
        self.op_type = "hard_shrink"
806
        self.init_dtype()
807
        self.init_shape()
808

809 810
        self.threshold = 0.5
        self.set_attrs()
811
        np.random.seed(1024)
812
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
813
        out = ref_hardshrink(x, self.threshold)
814

815
        self.attrs = {'threshold': self.threshold}
816
        self.inputs = {'X': x}
817
        self.outputs = {'Out': out}
818

819 820 821
    def init_shape(self):
        self.shape = [10, 12]

822 823 824
    def set_attrs(self):
        pass

825
    def test_check_grad(self):
826 827
        if self.dtype == np.float16:
            return
828
        self.check_grad(['X'], 'Out')
829 830


831 832 833 834 835
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


836 837 838 839 840 841 842 843
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


844 845 846
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
847
        np.random.seed(1024)
848
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
849 850 851
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
852
            else paddle.CPUPlace()
853
        )
854 855

    def test_static_api(self):
856
        paddle.enable_static()
857
        with paddle.static.program_guard(paddle.static.Program()):
858
            x = paddle.fluid.data('X', [10, 12])
859 860 861 862 863 864 865
            out1 = F.hardshrink(x)
            hd = paddle.nn.Hardshrink()
            out2 = hd(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in res:
866
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
867 868 869

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
870
        x = paddle.to_tensor(self.x_np)
871 872 873 874 875
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
876
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
877 878 879 880 881 882

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
883
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
884 885
        paddle.enable_static()

886
    def test_errors(self):
887
        paddle.enable_static()
888
        with paddle.static.program_guard(paddle.static.Program()):
889
            # The input type must be Variable.
890
            self.assertRaises(TypeError, F.hardshrink, 1)
891
            # The input dtype must be float16, float32, float64.
892 893 894
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
895
            self.assertRaises(TypeError, F.hardshrink, x_int32)
896
            # support the input dtype is float16
897 898 899
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
900
            F.hardshrink(x_fp16)
901 902


903 904 905 906 907 908 909 910 911 912 913
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
914
        np.random.seed(1024)
915
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
916 917 918
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
919
            else paddle.CPUPlace()
920
        )
921 922

    def test_static_api(self):
923
        paddle.enable_static()
924
        with paddle.static.program_guard(paddle.static.Program()):
925
            x = paddle.fluid.data('X', [10, 12])
926 927 928 929 930 931 932
            out1 = F.hardtanh(x)
            m = paddle.nn.Hardtanh()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardtanh(self.x_np)
        for r in res:
933
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
934 935 936

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
937
        x = paddle.to_tensor(self.x_np)
938 939 940 941 942
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
943
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
944 945 946 947 948 949

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
950
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
951 952 953
        paddle.enable_static()

    def test_errors(self):
954
        paddle.enable_static()
955 956 957 958
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.hardtanh, 1)
            # The input dtype must be float16, float32, float64.
959 960 961
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
962 963
            self.assertRaises(TypeError, F.hardtanh, x_int32)
            # support the input dtype is float16
964 965 966
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
967 968 969
            F.hardtanh(x_fp16)


970 971 972
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
973 974
        out - threshold
    )
975 976 977 978
    return out


class TestSoftshrink(TestActivation):
979 980
    def setUp(self):
        self.op_type = "softshrink"
981 982
        self.check_eager = True
        self.python_api = paddle.nn.functional.softshrink
983
        self.init_dtype()
984
        self.init_shape()
985

986
        threshold = 0.8
987

988
        np.random.seed(1023)
989
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
990 991 992
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
993
        self.outputs = {'Out': out}
994 995

    def test_check_grad(self):
996 997
        if self.dtype == np.float16:
            return
998
        self.check_grad(['X'], 'Out', check_eager=True)
999

1000

1001 1002 1003 1004 1005
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1006 1007 1008 1009
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1010
        np.random.seed(1024)
1011
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1012 1013 1014
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1015
            else paddle.CPUPlace()
1016
        )
1017 1018

    def test_static_api(self):
1019
        paddle.enable_static()
1020
        with paddle.static.program_guard(paddle.static.Program()):
1021
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1022 1023 1024 1025 1026 1027 1028
            out1 = F.softshrink(x, self.threshold)
            softshrink = paddle.nn.Softshrink(self.threshold)
            out2 = softshrink(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in res:
1029
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1030 1031 1032 1033 1034 1035 1036 1037 1038

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1039
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1040 1041
        paddle.enable_static()

1042
    def test_errors(self):
1043
        paddle.enable_static()
1044
        with paddle.static.program_guard(paddle.static.Program()):
1045
            # The input type must be Variable.
1046
            self.assertRaises(TypeError, F.softshrink, 1)
1047
            # The input dtype must be float16, float32, float64.
1048 1049 1050
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1051
            self.assertRaises(TypeError, F.softshrink, x_int32)
1052
            # The threshold must be no less than zero
1053 1054 1055
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[12, 10], dtype='float32'
            )
1056
            self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
1057
            # support the input dtype is float16
1058 1059 1060
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1061
            F.softshrink(x_fp16)
1062 1063


1064
class TestSqrt(TestActivation, TestParameter):
1065 1066
    def setUp(self):
        self.op_type = "sqrt"
1067
        self.python_api = paddle.sqrt
1068
        self.init_dtype()
1069
        self.init_shape()
1070

1071
        np.random.seed(1023)
1072
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1073 1074 1075 1076
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1077 1078

    def test_check_grad(self):
1079 1080
        if self.dtype == np.float16:
            return
1081 1082 1083 1084
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
1085

1086

1087 1088 1089 1090 1091
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []


1092 1093 1094
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1095 1096 1097
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1098
        self.python_api = paddle.sqrt
1099
        self.init_dtype()
1100
        self.init_shape()
1101 1102

        np.random.seed(1023)
1103
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1114 1115 1116
    def init_shape(self):
        self.shape = [11, 17]

1117 1118
    def test_check_output(self):
        place = core.CUDAPlace(0)
1119
        self.check_output_with_place(place, check_eager=True)
1120 1121 1122

    def test_check_grad(self):
        place = core.CUDAPlace(0)
1123
        self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
1124 1125


Z
zhoukunsheng 已提交
1126 1127 1128
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
Z
zyfncg 已提交
1129
        self.python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1130
        self.init_dtype()
1131
        self.init_shape()
Z
zhoukunsheng 已提交
1132

1133
        np.random.seed(1024)
1134
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1135 1136 1137 1138 1139
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1140 1141 1142
    def init_shape(self):
        self.shape = [10, 12]

Z
zhoukunsheng 已提交
1143 1144 1145
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1146 1147 1148
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.0005, check_eager=True
        )
Z
zhoukunsheng 已提交
1149 1150


1151 1152 1153 1154 1155 1156 1157 1158
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1159
class TestAbs(TestActivation):
1160 1161
    def setUp(self):
        self.op_type = "abs"
1162
        self.init_dtype()
1163
        self.init_shape()
1164

1165
        np.random.seed(1024)
1166
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1167
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1168
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1169
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1170 1171
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1172 1173 1174 1175
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1176

1177 1178 1179
    def init_shape(self):
        self.shape = [4, 25]

1180
    def test_check_grad(self):
1181 1182
        if self.dtype == np.float16:
            return
1183
        self.check_grad(['X'], 'Out', check_eager=False)
1184

1185

1186 1187 1188 1189 1190
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1191
class TestCeil(TestActivation):
D
dzhwinter 已提交
1192 1193
    def setUp(self):
        self.op_type = "ceil"
1194 1195
        self.check_eager = True
        self.python_api = paddle.ceil
1196
        self.init_dtype()
1197
        self.init_shape()
1198

1199
        np.random.seed(1024)
1200
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1201 1202 1203 1204
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1205

1206 1207 1208
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1209
    # The same reason with TestFloor
C
chengduo 已提交
1210
    def test_check_grad(self):
1211 1212 1213
        pass


1214 1215 1216 1217 1218
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1219
class TestFloor(TestActivation):
D
dzhwinter 已提交
1220 1221
    def setUp(self):
        self.op_type = "floor"
1222 1223
        self.check_eager = True
        self.python_api = paddle.floor
1224
        self.init_dtype()
1225
        self.init_shape()
1226

1227
        np.random.seed(1024)
1228
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1229 1230 1231 1232
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1233

1234 1235 1236
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1237
    # the gradient on floor, ceil, round is undefined.
1238
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1239 1240
    # The same reason with TestFloor
    def test_check_grad(self):
1241 1242 1243
        pass


1244 1245 1246 1247 1248
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1249
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1250 1251
    def setUp(self):
        self.op_type = "cos"
1252
        self.init_dtype()
1253
        self.init_shape()
1254

1255
        np.random.seed(1024)
1256
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1257 1258 1259 1260
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1261

1262 1263 1264
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1265
    def test_check_grad(self):
1266 1267
        if self.dtype == np.float16:
            return
1268
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
1269

1270

1271 1272 1273 1274 1275
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1276 1277 1278 1279 1280
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
        self.init_dtype()
1281 1282
        self.init_shape()

J
joejiong 已提交
1283
        self.dtype = 'float32'
1284
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1285 1286 1287
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1288
            else paddle.CPUPlace()
1289
        )
J
joejiong 已提交
1290 1291 1292 1293 1294 1295

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1296 1297 1298
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1299 1300 1301 1302 1303
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1315 1316 1317
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1318
            else paddle.CPUPlace()
1319
        )
1320

J
joejiong 已提交
1321 1322 1323 1324 1325
    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1326
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1327 1328 1329 1330 1331
        paddle.enable_static()

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
1332
            x = paddle.static.data('X', [11, 17], self.dtype)
J
joejiong 已提交
1333 1334 1335 1336
            out = paddle.tan(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = np.tan(self.x_np)
1337
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1338 1339 1340 1341

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1342 1343 1344
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1345 1346 1347 1348 1349 1350 1351 1352
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1353 1354 1355 1356
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()
1357
        self.init_shape()
1358

1359
        np.random.seed(1024)
1360
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1361 1362 1363 1364 1365
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1366 1367 1368
    def init_shape(self):
        self.shape = [10, 12]

1369 1370 1371
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1372
        self.check_grad(['X'], 'Out')
1373 1374


1375 1376 1377 1378 1379
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1380
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1381 1382
    def setUp(self):
        self.op_type = "sin"
1383
        self.init_dtype()
1384
        self.init_shape()
1385

1386
        np.random.seed(1024)
1387
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1388 1389 1390 1391
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1392

1393 1394 1395
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1396
    def test_check_grad(self):
1397 1398
        if self.dtype == np.float16:
            return
1399
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
1400 1401


1402 1403 1404 1405 1406
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1407 1408 1409 1410
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()
1411
        self.init_shape()
1412

1413
        np.random.seed(2048)
1414
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1415 1416 1417 1418 1419
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1420 1421 1422
    def init_shape(self):
        self.shape = [10, 12]

1423 1424 1425
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1426
        self.check_grad(['X'], 'Out')
1427 1428


1429 1430 1431 1432 1433
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1434 1435 1436 1437
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
        self.init_dtype()
1438
        self.init_shape()
X
xiaoting 已提交
1439 1440

        np.random.seed(1024)
1441
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1442 1443 1444 1445 1446
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1447 1448 1449
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1450 1451 1452 1453 1454 1455
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1456 1457 1458 1459 1460
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1461 1462 1463 1464
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
        self.init_dtype()
1465
        self.init_shape()
X
xiaoting 已提交
1466 1467

        np.random.seed(1024)
1468
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1469 1470 1471 1472 1473
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1474 1475 1476
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1477 1478 1479 1480 1481 1482
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1483 1484 1485 1486 1487
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1488 1489 1490 1491
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
        self.init_dtype()
1492
        self.init_shape()
X
xiaoting 已提交
1493 1494

        np.random.seed(400)
1495
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1496 1497 1498 1499 1500
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1501 1502 1503
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1504 1505 1506 1507 1508 1509
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1510 1511 1512 1513 1514
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1515
class TestRound(TestActivation):
D
dzhwinter 已提交
1516 1517
    def setUp(self):
        self.op_type = "round"
1518 1519
        self.check_eager = True
        self.python_api = paddle.round
1520
        self.init_dtype()
1521
        self.init_shape()
1522

1523
        np.random.seed(1024)
1524
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1525 1526 1527 1528
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1529

1530 1531 1532
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1533
    def test_check_grad(self):
1534 1535 1536
        pass


1537 1538 1539 1540 1541
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1542
class TestRelu(TestActivation):
1543
    def setUp(self):
Q
qijun 已提交
1544
        self.op_type = "relu"
K
Kexin Zhao 已提交
1545
        self.init_dtype()
1546
        self.init_shape()
K
Kexin Zhao 已提交
1547

1548
        np.random.seed(1024)
1549
        if self.dtype == np.uint16:
1550
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1551 1552 1553 1554 1555
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1556
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1557 1558 1559 1560
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1561 1562

        self.outputs = {'Out': out}
1563 1564

    def test_check_grad(self):
K
Kexin Zhao 已提交
1565 1566
        if self.dtype == np.float16:
            return
1567
        self.check_grad(['X'], 'Out')
A
Adam 已提交
1568 1569


1570 1571 1572 1573 1574
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1575 1576 1577
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1578
        np.random.seed(1024)
1579
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1580 1581 1582
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1583
            else paddle.CPUPlace()
1584
        )
1585 1586 1587 1588
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1589 1590

    def test_static_api(self):
1591
        paddle.enable_static()
1592
        with paddle.static.program_guard(paddle.static.Program()):
1593
            x = paddle.fluid.data('X', [10, 12])
1594
            out1 = self.relu(x)
1595 1596 1597 1598 1599 1600
            m = paddle.nn.ReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = np.maximum(self.x_np, 0)
        for r in res:
1601
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1602 1603 1604 1605 1606

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1607 1608
        out1 = m(x)
        out2 = self.relu(x)
1609 1610
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1611
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1612 1613
        paddle.enable_static()

1614
    def test_errors(self):
1615
        paddle.enable_static()
1616
        with paddle.static.program_guard(paddle.static.Program()):
1617
            # The input type must be Variable.
1618
            self.assertRaises(TypeError, self.relu, 1)
1619
            # The input dtype must be float16, float32, float64.
1620 1621 1622
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
1623
            self.assertRaises(TypeError, self.relu, x_int32)
1624
            # support the input dtype is float16
1625 1626 1627
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
1628 1629 1630 1631 1632 1633 1634
            self.relu(x_fp16)


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1635 1636


1637 1638 1639 1640 1641 1642
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1643
class TestLeakyRelu(TestActivation):
1644 1645 1646
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1647 1648 1649
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()
1650
        self.init_shape()
1651
        alpha = self.get_alpha()
A
Adam 已提交
1652

1653
        np.random.seed(1024)
1654
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1655
        # The same reason with TestAbs
1656 1657
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1658

1659
        self.inputs = {'X': x}
A
Adam 已提交
1660
        self.outputs = {'Out': out}
1661
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1662 1663 1664 1665

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1666
        self.check_grad(['X'], 'Out')
1667 1668


1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1684 1685 1686 1687 1688
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1689 1690 1691
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1692
        np.random.seed(1024)
1693
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1694 1695 1696
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1697
            else paddle.CPUPlace()
1698
        )
1699 1700

    def test_static_api(self):
1701
        paddle.enable_static()
1702
        with paddle.static.program_guard(paddle.static.Program()):
1703
            x = paddle.fluid.data('X', [10, 12])
1704 1705 1706 1707 1708 1709 1710
            out1 = F.leaky_relu(x)
            m = paddle.nn.LeakyReLU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_leaky_relu(self.x_np)
        for r in res:
1711
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1712 1713 1714

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
Z
Zhou Wei 已提交
1715
        x = paddle.to_tensor(self.x_np)
1716 1717 1718 1719 1720
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1721
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1722 1723 1724 1725 1726 1727

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1728
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1729 1730
        paddle.enable_static()

1731
    def test_errors(self):
1732
        paddle.enable_static()
1733
        with paddle.static.program_guard(paddle.static.Program()):
1734
            # The input type must be Variable.
1735
            self.assertRaises(TypeError, F.leaky_relu, 1)
1736
            # The input dtype must be float16, float32, float64.
1737 1738 1739
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1740 1741
            self.assertRaises(TypeError, F.leaky_relu, x_int32)
            # support the input dtype is float16
1742 1743 1744
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1745
            F.leaky_relu(x_fp16)
1746 1747


1748 1749
def gelu(x, approximate):
    if approximate:
1750 1751 1752 1753 1754 1755 1756 1757
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
1758 1759 1760 1761 1762 1763
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
1764 1765 1766
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1767
        self.init_shape()
1768
        approximate = True
1769
        np.random.seed(1024)
1770
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1771
        out = gelu(x, approximate)
C
Clementine 已提交
1772

1773
        self.inputs = {'X': x}
1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
1787
        self.init_shape()
1788
        approximate = False
1789
        np.random.seed(2048)
1790
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1791
        out = gelu(x, approximate)
C
Clementine 已提交
1792

1793
        self.inputs = {'X': x}
C
Clementine 已提交
1794
        self.outputs = {'Out': out}
1795
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
1796 1797 1798 1799

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1800
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
1801 1802


1803 1804 1805 1806 1807
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


1808 1809 1810
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
1811
        np.random.seed(1024)
1812
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
1813 1814 1815
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1816
            else paddle.CPUPlace()
1817
        )
1818 1819

    def test_static_api(self):
1820
        paddle.enable_static()
1821
        with paddle.static.program_guard(paddle.static.Program()):
1822
            x = paddle.fluid.data('X', [11, 17])
1823 1824 1825 1826 1827 1828 1829
            out1 = F.gelu(x)
            m = paddle.nn.GELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = gelu(self.x_np, False)
        for r in res:
1830
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1831 1832 1833 1834 1835 1836 1837 1838 1839

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
1840
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1841 1842 1843 1844 1845 1846

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
1847
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1848 1849 1850
        paddle.enable_static()

    def test_errors(self):
1851
        paddle.enable_static()
1852 1853 1854 1855
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.gelu, 1)
            # The input dtype must be float16, float32, float64.
1856 1857 1858
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[11, 17], dtype='int32'
            )
1859 1860
            self.assertRaises(TypeError, F.gelu, x_int32)
            # support the input dtype is float16
1861 1862 1863
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[11, 17], dtype='float16'
            )
1864 1865 1866
            F.gelu(x_fp16)


C
chengduo 已提交
1867
class TestBRelu(TestActivation):
1868 1869
    def setUp(self):
        self.op_type = "brelu"
1870 1871
        self.init_dtype()

1872
        np.random.seed(1024)
Z
zhupengyang 已提交
1873
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
1874 1875
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
1876 1877
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
1878
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
1879 1880 1881
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
1882 1883 1884

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
1885
        self.outputs = {'Out': t}
1886 1887

    def test_check_grad(self):
1888 1889
        if self.dtype == np.float16:
            return
1890
        self.check_grad(['X'], 'Out')
1891

1892

1893 1894 1895 1896 1897 1898 1899
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
1900
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
1901
    def setUp(self):
1902
        self.op_type = "relu6"
1903
        self.init_dtype()
1904
        self.init_shape()
1905
        self.python_api = paddle.nn.functional.relu6
1906

1907
        np.random.seed(1024)
1908
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
1909
        x[np.abs(x) < 0.005] = 0.02
1910
        out = ref_relu6(x)
1911

1912 1913
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
1914
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
1915

1916 1917 1918
    def init_shape(self):
        self.shape = [10, 12]

1919 1920 1921
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1922
        self.check_grad(['X'], 'Out', check_eager=True)
1923 1924


1925 1926 1927 1928 1929
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


1930 1931 1932
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
1933
        np.random.seed(1024)
1934 1935
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
1936 1937 1938
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1939
            else paddle.CPUPlace()
1940
        )
1941 1942

    def test_static_api(self):
1943
        paddle.enable_static()
1944
        with paddle.static.program_guard(paddle.static.Program()):
1945
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
1946 1947 1948 1949 1950 1951 1952
            out1 = F.relu6(x)
            relu6 = paddle.nn.ReLU6()
            out2 = relu6(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_relu6(self.x_np)
        for r in res:
1953
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1954 1955 1956 1957 1958 1959 1960 1961 1962

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
1963
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1964 1965 1966
        paddle.enable_static()

    def test_fluid_api(self):
1967
        paddle.enable_static()
1968 1969
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
1970
            out = paddle.nn.functional.relu6(x)
1971 1972 1973
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_relu6(self.x_np)
1974
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
1975

1976
    def test_errors(self):
1977
        paddle.enable_static()
1978
        with paddle.static.program_guard(paddle.static.Program()):
1979
            # The input type must be Variable.
1980
            self.assertRaises(TypeError, F.relu6, 1)
1981
            # The input dtype must be float16, float32, float64.
1982 1983 1984
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
1985
            self.assertRaises(TypeError, F.relu6, x_int32)
1986
            # support the input dtype is float16
1987 1988 1989
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
1990
            F.relu6(x_fp16)
1991 1992


1993
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
1994 1995 1996 1997
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
1998 1999 2000
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2001 2002


H
huangjun12 已提交
2003 2004 2005 2006
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2007
        self.init_shape()
2008
        self.python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2009

2010
        np.random.seed(1024)
2011
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2012 2013 2014
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2015
        # the same with TestAbs
H
huangjun12 已提交
2016 2017
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2018
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2019

2020
        self.inputs = {'X': x}
H
huangjun12 已提交
2021 2022 2023
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

2024 2025 2026
    def init_shape(self):
        self.shape = [10, 12]

H
huangjun12 已提交
2027
    def test_check_grad(self):
2028 2029 2030 2031
        self.check_grad(['X'], 'Out', check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
H
huangjun12 已提交
2032 2033


2034 2035 2036 2037 2038
class TestHardSwish_ZeroDim(TestHardSwish):
    def init_shape(self):
        self.shape = []


2039 2040 2041 2042
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2043 2044 2045
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2046
            else paddle.CPUPlace()
2047
        )
2048 2049 2050

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
2051
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2052 2053 2054 2055 2056 2057 2058
            out1 = F.hardswish(x)
            m = paddle.nn.Hardswish()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardswish(self.x_np)
        for r in res:
2059
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2060 2061 2062

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
2063
        x = paddle.to_tensor([11648.0, 11448.0])
2064 2065 2066
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2067
        out_ref = [11648.0, 11448.0]
2068
        for r in [out1, out2]:
2069
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2070
        paddle.enable_static()
2071 2072 2073 2074

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
2075
            out = paddle.nn.functional.hardswish(x)
2076 2077 2078
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardswish(self.x_np)
2079
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2080 2081 2082

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2083
        out = paddle.nn.functional.hardswish(x)
2084
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2085 2086 2087 2088
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
2089
            # The input type must be Variable.
2090
            self.assertRaises(TypeError, F.hardswish, 1)
2091
            # The input dtype must be float16, float32, float64.
2092 2093 2094
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2095
            self.assertRaises(TypeError, F.hardswish, x_int32)
2096
            # support the input dtype is float16
2097 2098 2099
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2100
            F.hardswish(x_fp16)
2101 2102


C
chengduo 已提交
2103
class TestSoftRelu(TestActivation):
2104 2105
    def setUp(self):
        self.op_type = "soft_relu"
2106 2107
        self.init_dtype()

2108
        np.random.seed(4096)
2109
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2110
        threshold = 2.0
Q
qijun 已提交
2111 2112
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2113
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2114 2115 2116
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2117 2118 2119 2120 2121
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2122 2123

    def test_check_grad(self):
2124 2125
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
2126
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
2127

2128

2129
def elu(x, alpha):
Z
zhupengyang 已提交
2130
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2131 2132 2133
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2134
class TestELU(TestActivation):
2135 2136
    def setUp(self):
        self.op_type = "elu"
2137
        self.init_dtype()
2138
        self.init_shape()
2139

2140
        np.random.seed(1024)
2141
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2142
        alpha = self.get_alpha()
2143
        out = elu(x, alpha)
2144 2145 2146 2147
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2148
        self.outputs = {'Out': out}
2149

2150 2151 2152
    def init_shape(self):
        self.shape = [10, 12]

2153
    def test_check_grad(self):
2154 2155
        if self.dtype == np.float16:
            return
2156
        self.check_grad(['X'], 'Out')
2157

Z
zhupengyang 已提交
2158
    def get_alpha(self):
2159
        return 1.0
Z
zhupengyang 已提交
2160 2161 2162 2163 2164 2165


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2166

2167 2168 2169 2170 2171
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2172 2173 2174
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2175
        np.random.seed(1024)
2176
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2177 2178 2179
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2180
            else paddle.CPUPlace()
2181
        )
2182 2183 2184 2185
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2186 2187

    def test_static_api(self):
2188
        paddle.enable_static()
2189
        with paddle.static.program_guard(paddle.static.Program()):
2190
            x = paddle.fluid.data('X', [10, 12])
2191
            out1 = self.elu(x)
2192 2193 2194 2195 2196 2197
            m = paddle.nn.ELU()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = elu(self.x_np, 1.0)
        for r in res:
2198
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2199 2200 2201 2202

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
2203 2204
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2205 2206 2207 2208
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2209
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2210

2211 2212
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2213 2214 2215 2216
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2217
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2218 2219
        paddle.enable_static()

2220
    def test_errors(self):
2221
        paddle.enable_static()
2222 2223
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
2224
            self.assertRaises(TypeError, self.elu, 1)
2225
            # The input dtype must be float16, float32, float64.
2226 2227 2228
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2229
            self.assertRaises(TypeError, self.elu, x_int32)
2230
            # support the input dtype is float16
2231 2232 2233
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2234 2235 2236
            self.elu(x_fp16)


Z
zhupengyang 已提交
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)
        paddle.enable_static()


2249 2250 2251 2252 2253 2254 2255 2256 2257
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2258
        self.init_shape()
2259

2260
        self.python_api = paddle.nn.functional.celu
2261
        np.random.seed(1024)
2262
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2263 2264 2265 2266 2267 2268
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2269 2270 2271
    def init_shape(self):
        self.shape = [10, 12]

2272 2273 2274
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2275
        self.check_grad(['X'], 'Out', check_eager=True)
2276 2277


2278 2279 2280 2281 2282
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2283 2284 2285 2286 2287
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2288 2289 2290
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2291
            else paddle.CPUPlace()
2292
        )
2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out1 = self.celu(x, 1.5)
            m = paddle.nn.CELU(1.5)
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = celu(self.x_np, 1.5)
        for r in res:
2309
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2320
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2321 2322 2323 2324 2325 2326 2327

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2328
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2329 2330 2331 2332 2333 2334 2335 2336
        paddle.enable_static()

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, self.celu, 1)
            # The input dtype must be float16, float32, float64.
2337 2338 2339
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[10, 12], dtype='int32'
            )
2340 2341
            self.assertRaises(TypeError, self.celu, x_int32)
            # The alpha must be not equal 0
2342 2343 2344
            x_fp32 = paddle.fluid.data(
                name='x_fp32', shape=[10, 12], dtype='float32'
            )
2345 2346
            self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
            # support the input dtype is float16
2347 2348 2349
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[10, 12], dtype='float16'
            )
2350 2351 2352
            self.celu(x_fp16)


C
chengduo 已提交
2353
class TestReciprocal(TestActivation):
Q
qijun 已提交
2354 2355
    def setUp(self):
        self.op_type = "reciprocal"
2356
        self.python_api = paddle.reciprocal
2357
        self.init_dtype()
2358
        self.init_shape()
2359

2360
        np.random.seed(1024)
2361
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2362 2363 2364 2365
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2366 2367

    def test_check_grad(self):
2368 2369
        if self.dtype == np.float16:
            return
2370 2371 2372 2373
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2374 2375


2376 2377 2378 2379 2380
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2381
class TestLog(TestActivation):
Q
qijun 已提交
2382 2383
    def setUp(self):
        self.op_type = "log"
2384 2385
        self.check_eager = True
        self.python_api = paddle.log
2386
        self.init_dtype()
2387
        self.init_shape()
2388

2389
        np.random.seed(1024)
2390
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2391 2392 2393 2394
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2395 2396

    def test_check_grad(self):
2397 2398
        if self.dtype == np.float16:
            return
2399
        self.check_grad(['X'], 'Out', check_eager=True)
Q
qijun 已提交
2400

2401
    def test_error(self):
2402 2403 2404 2405 2406 2407
        in1 = fluid.layers.data(
            name="in1", shape=[11, 17], append_batch_size=False, dtype="int32"
        )
        in2 = fluid.layers.data(
            name="in2", shape=[11, 17], append_batch_size=False, dtype="int64"
        )
2408

2409 2410
        self.assertRaises(TypeError, paddle.log, in1)
        self.assertRaises(TypeError, paddle.log, in2)
2411

2412

2413 2414 2415 2416 2417
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2418 2419 2420
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2421 2422
        self.check_eager = True
        self.python_api = paddle.log2
J
joejiong 已提交
2423
        self.init_dtype()
2424
        self.init_shape()
J
joejiong 已提交
2425

2426
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2427 2428 2429 2430 2431 2432 2433 2434
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2435
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2436 2437 2438 2439 2440 2441 2442 2443 2444

    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log2, in1)
        self.assertRaises(TypeError, paddle.log2, in2)

    def test_api(self):
2445 2446 2447
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2448
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2449 2450 2451
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2452 2453 2454 2455

            out1 = paddle.log2(data_x)
            exe = paddle.static.Executor(place=fluid.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2456 2457 2458 2459 2460
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2461
        expected_res = np.log2(input_x)
2462
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2463 2464 2465 2466 2467 2468 2469 2470

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2471
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2472 2473


2474 2475 2476 2477 2478
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2479 2480 2481
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2482 2483
        self.check_eager = True
        self.python_api = paddle.log10
J
joejiong 已提交
2484
        self.init_dtype()
2485
        self.init_shape()
J
joejiong 已提交
2486

2487
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2488 2489 2490 2491 2492 2493 2494 2495
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2496
        self.check_grad(['X'], 'Out', check_eager=True)
J
joejiong 已提交
2497

2498 2499 2500 2501 2502 2503 2504

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2505 2506 2507 2508 2509 2510 2511 2512
    def test_error(self):
        in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
        in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")

        self.assertRaises(TypeError, paddle.log10, in1)
        self.assertRaises(TypeError, paddle.log10, in2)

    def test_api(self):
2513 2514 2515
        with paddle.static.program_guard(
            paddle.static.Program(), paddle.static.Program()
        ):
J
joejiong 已提交
2516
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2517 2518 2519
            data_x = paddle.static.data(
                name="data_x", shape=[11, 17], dtype="float64"
            )
J
joejiong 已提交
2520 2521 2522 2523

            out1 = paddle.log10(data_x)
            exe = paddle.static.Executor(place=paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
2524 2525 2526 2527 2528
            (res1,) = exe.run(
                paddle.static.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
J
joejiong 已提交
2529
        expected_res = np.log10(input_x)
2530
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2531 2532 2533 2534 2535 2536 2537 2538

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2539
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2540 2541


2542 2543 2544
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2545 2546
        self.check_eager = True
        self.python_api = paddle.log1p
2547
        self.init_dtype()
2548
        self.init_shape()
2549

2550
        np.random.seed(1024)
2551
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2552 2553 2554 2555 2556 2557 2558 2559
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2560
        self.check_grad(['X'], 'Out', check_eager=True)
2561

2562 2563 2564 2565 2566 2567 2568

class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2569 2570 2571
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
2572 2573 2574 2575 2576 2577
            data_x = fluid.layers.data(
                name="data_x",
                shape=[11, 17],
                append_batch_size=False,
                dtype="float64",
            )
2578 2579 2580 2581

            out1 = paddle.log1p(data_x)
            exe = fluid.Executor(place=fluid.CPUPlace())
            exe.run(fluid.default_startup_program())
2582 2583 2584 2585 2586
            (res1,) = exe.run(
                fluid.default_main_program(),
                feed={"data_x": input_x},
                fetch_list=[out1],
            )
2587
        expected_res = np.log1p(input_x)
2588
        np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2589 2590 2591 2592 2593 2594 2595 2596

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2597
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2598 2599


C
chengduo 已提交
2600
class TestSquare(TestActivation):
Q
qijun 已提交
2601 2602
    def setUp(self):
        self.op_type = "square"
2603
        self.python_api = paddle.square
2604
        self.init_dtype()
2605
        self.init_shape()
2606

2607
        np.random.seed(1024)
2608
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2609 2610 2611 2612
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2613 2614

    def test_check_grad(self):
2615 2616
        if self.dtype == np.float16:
            return
2617 2618 2619
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.007, check_eager=True
        )
2620 2621 2622

    def test_check_output(self):
        self.check_output(check_eager=True)
Q
qijun 已提交
2623

2624

2625 2626 2627 2628 2629
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2630 2631 2632
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2633 2634 2635
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2636
        self.python_api = paddle.square
2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
2653
        self.check_output_with_place(place, check_eager=True)
2654 2655 2656

    def test_check_grad(self):
        place = core.CUDAPlace(0)
2657 2658 2659
        self.check_grad_with_place(
            place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
        )
2660 2661


C
chengduo 已提交
2662
class TestPow(TestActivation):
2663 2664
    def setUp(self):
        self.op_type = "pow"
2665
        self.python_api = paddle.pow
2666
        self.check_eager = True
2667
        self.init_dtype()
2668
        self.init_shape()
2669

2670
        np.random.seed(1024)
2671
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2672 2673 2674
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
2675
        self.attrs = {'factor': 3.0}
2676
        self.outputs = {'Out': out}
2677

2678 2679 2680
    def test_check_output(self):
        self.check_output(check_eager=self.check_eager)

2681
    def test_check_grad(self):
2682 2683
        if self.dtype == np.float16:
            return
2684
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2685

2686

2687 2688 2689 2690 2691
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


2692 2693 2694
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
2695 2696
        self.check_eager = False
        self.python_api = paddle.pow
2697 2698
        self.init_dtype()

2699
        np.random.seed(1024)
2700 2701 2702 2703 2704
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
2705
            'FactorTensor': np.array([3.0]).astype("float32"),
2706 2707 2708 2709 2710 2711
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
2712
        self.check_output(check_eager=self.check_eager)
2713 2714 2715 2716

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2717
        self.check_grad(['X'], 'Out', check_eager=self.check_eager)
2718 2719 2720

    def test_api(self):
        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
2721 2722 2723 2724 2725 2726
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
        res = fluid.layers.data(
            name="res", shape=[11, 17], append_batch_size=False, dtype="float32"
        )
2727 2728 2729

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
2730 2731
        out_1 = paddle.pow(x, factor_1)
        out_2 = paddle.pow(x, factor_2)
2732 2733 2734
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
2735 2736

        exe = fluid.Executor(place=fluid.CPUPlace())
W
WuHaobo 已提交
2737
        res_1, res_2, res, res_6 = exe.run(
2738 2739
            fluid.default_main_program(),
            feed={"x": input},
2740 2741
            fetch_list=[out_1, out_2, res, out_6],
        )
2742

2743 2744 2745
        assert np.allclose(res_1, np.power(input, 2))
        assert np.allclose(res_2, np.power(input, 3))
        assert np.allclose(res_6, np.power(input, 3))
2746 2747


2748 2749 2750 2751 2752
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
2753
class TestSTanh(TestActivation):
2754 2755 2756 2757 2758 2759
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

2760 2761
    def setUp(self):
        self.op_type = "stanh"
2762
        self.init_dtype()
2763 2764
        self.init_shape()

2765 2766
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
2767

2768
        np.random.seed(1024)
2769
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2770 2771
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
2772

2773
        self.inputs = {'X': x}
2774
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
2775
        self.outputs = {'Out': out}
2776

Q
qijun 已提交
2777
    def test_check_grad(self):
2778 2779
        if self.dtype == np.float16:
            return
2780
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
2781

2782

2783 2784 2785 2786 2787 2788 2789 2790 2791 2792
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


2793 2794 2795 2796 2797
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
2811 2812 2813
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
2814
            else paddle.CPUPlace()
2815
        )
2816 2817 2818 2819 2820 2821 2822 2823 2824 2825

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('X', [10, 12])
            out = paddle.stanh(x, self.scale_a, self.scale_b)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in res:
2826
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2827 2828 2829 2830 2831 2832 2833

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
2834
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2835 2836 2837 2838 2839 2840
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', [10, 12])
2841
            out = paddle.stanh(x, self.scale_a, self.scale_b)
2842 2843 2844
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
2845
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2846

2847
    def test_errors(self):
2848 2849
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
2850
            # The input type must be Variable.
2851
            self.assertRaises(TypeError, paddle.stanh, 1)
2852
            # The input dtype must be float16, float32, float64.
2853 2854 2855
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2856
            self.assertRaises(TypeError, paddle.stanh, x_int32)
2857
            # support the input dtype is float16
2858 2859 2860
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871
            paddle.stanh(x_fp16)


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
2872 2873


2874 2875
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
2876 2877 2878 2879
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
2880 2881 2882
    return out


C
chengduo 已提交
2883
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
2884 2885
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
2886
        self.python_api = paddle.nn.functional.softplus
2887
        self.init_dtype()
2888
        self.init_shape()
2889

2890 2891
        beta = 2
        threshold = 15
2892

2893
        np.random.seed(1024)
2894
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2895 2896 2897
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
2898
        self.outputs = {'Out': out}
K
kexinzhao 已提交
2899

W
Wang Bojun 已提交
2900 2901
        self.check_eager = True

2902 2903 2904
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
2905
    def test_check_grad(self):
2906 2907
        if self.dtype == np.float16:
            return
W
Wang Bojun 已提交
2908 2909 2910
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
K
kexinzhao 已提交
2911

2912

2913 2914 2915 2916 2917
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


2918 2919 2920
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


2948 2949 2950 2951 2952
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
2953
        np.random.seed(1024)
2954
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2955 2956 2957
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2958
            else paddle.CPUPlace()
2959
        )
2960 2961

    def test_static_api(self):
2962
        paddle.enable_static()
2963
        with paddle.static.program_guard(paddle.static.Program()):
2964
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
2965 2966 2967 2968 2969 2970 2971
            out1 = F.softplus(x, self.beta, self.threshold)
            softplus = paddle.nn.Softplus(self.beta, self.threshold)
            out2 = softplus(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in res:
2972
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2973 2974 2975 2976 2977 2978 2979 2980 2981

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
2982
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2983 2984 2985
        paddle.enable_static()

    def test_errors(self):
2986
        paddle.enable_static()
2987 2988 2989 2990
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softplus, 1)
            # The input dtype must be float16, float32, float64.
2991 2992 2993
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
2994 2995
            self.assertRaises(TypeError, F.softplus, x_int32)
            # support the input dtype is float16
2996 2997 2998
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
2999 3000 3001 3002 3003 3004 3005 3006
            F.softplus(x_fp16)


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3007
class TestSoftsign(TestActivation):
3008 3009
    def setUp(self):
        self.op_type = "softsign"
3010
        self.init_dtype()
3011 3012
        self.init_shape()

3013
        self.python_api = paddle.nn.functional.softsign
3014

3015
        np.random.seed(1024)
3016
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3017 3018
        out = ref_softsign(x)
        self.inputs = {'X': x}
3019
        self.outputs = {'Out': out}
3020

3021 3022 3023
    def init_shape(self):
        self.shape = [10, 12]

3024
    def test_check_grad(self):
3025 3026
        if self.dtype == np.float16:
            return
3027
        self.check_grad(['X'], 'Out', check_eager=True)
3028 3029


3030 3031 3032 3033 3034
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3035 3036 3037
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3038
        np.random.seed(1024)
3039
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3040 3041 3042
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3043
            else paddle.CPUPlace()
3044
        )
3045 3046

    def test_static_api(self):
3047
        paddle.enable_static()
3048
        with paddle.static.program_guard(paddle.static.Program()):
3049
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3050 3051 3052 3053 3054 3055 3056
            out1 = F.softsign(x)
            softsign = paddle.nn.Softsign()
            out2 = softsign(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_softsign(self.x_np)
        for r in res:
3057
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3058 3059 3060 3061 3062 3063 3064 3065 3066

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3067
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3068 3069 3070
        paddle.enable_static()

    def test_errors(self):
3071
        paddle.enable_static()
3072 3073 3074 3075
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.softsign, 1)
            # The input dtype must be float16, float32, float64.
3076 3077 3078
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3079 3080
            self.assertRaises(TypeError, F.softsign, x_int32)
            # support the input dtype is float16
3081 3082 3083
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3084 3085 3086
            F.softsign(x_fp16)


3087 3088 3089 3090 3091
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3092
class TestThresholdedRelu(TestActivation):
3093 3094
    def setUp(self):
        self.op_type = "thresholded_relu"
3095
        self.init_dtype()
3096
        self.init_shape()
3097

3098
        threshold = 15
3099

3100
        np.random.seed(1024)
3101
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3102 3103 3104 3105
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3106
        self.outputs = {'Out': out}
3107

3108 3109 3110
    def init_shape(self):
        self.shape = [10, 12]

3111
    def test_check_grad(self):
3112 3113
        if self.dtype == np.float16:
            return
3114
        self.check_grad(['X'], 'Out')
3115 3116


3117 3118 3119 3120 3121
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3122 3123 3124 3125 3126 3127 3128
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3129 3130 3131
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3132
            else paddle.CPUPlace()
3133
        )
3134 3135 3136 3137

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3138
            x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
3139 3140 3141 3142 3143 3144 3145
            out1 = F.thresholded_relu(x, self.threshold)
            thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
            out2 = thresholded_relu(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in res:
3146
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3147 3148 3149 3150 3151 3152 3153 3154 3155

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3156
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3157 3158
        paddle.enable_static()

3159
    def test_errors(self):
3160 3161
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3162
            # The input type must be Variable.
3163
            self.assertRaises(TypeError, F.thresholded_relu, 1)
3164
            # The input dtype must be float16, float32, float64.
3165 3166 3167
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3168
            self.assertRaises(TypeError, F.thresholded_relu, x_int32)
3169
            # support the input dtype is float16
3170 3171 3172
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3173
            F.thresholded_relu(x_fp16)
3174 3175


3176
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3177
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3178 3179


C
chengduo 已提交
3180
class TestHardSigmoid(TestActivation):
3181 3182
    def setUp(self):
        self.op_type = "hard_sigmoid"
3183 3184 3185 3186
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3187
        self.init_shape()
3188

3189
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3190
        lower_threshold = -self.offset / self.slope
3191
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3192

3193
        # Same reason as TestAbs
3194 3195 3196
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3197

3198
        out = ref_hardsigmoid(x, self.slope, self.offset)
3199

3200 3201
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3202
        self.outputs = {'Out': out}
3203

3204 3205 3206
    def init_shape(self):
        self.shape = [10, 12]

3207 3208
    def set_attrs(self):
        pass
3209

3210

3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3222 3223 3224 3225 3226
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3227 3228 3229 3230
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3231 3232 3233
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3234
            else paddle.CPUPlace()
3235
        )
3236 3237 3238

    def test_static_api(self):
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3239
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3240 3241 3242 3243 3244 3245 3246
            out1 = F.hardsigmoid(x)
            m = paddle.nn.Hardsigmoid()
            out2 = m(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_hardsigmoid(self.x_np)
        for r in res:
3247
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3248 3249 3250 3251 3252 3253 3254 3255 3256

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3257
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3258
        paddle.enable_static()
3259 3260 3261 3262

    def test_fluid_api(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3263
            out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3264 3265 3266
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
3267
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3268 3269 3270

        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
3271
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3272
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3273 3274 3275 3276
        paddle.enable_static()

    def test_errors(self):
        with paddle.static.program_guard(paddle.static.Program()):
3277
            # The input type must be Variable.
3278
            self.assertRaises(TypeError, F.hardsigmoid, 1)
3279
            # The input dtype must be float16, float32, float64.
3280 3281 3282
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3283
            self.assertRaises(TypeError, F.hardsigmoid, x_int32)
3284
            # support the input dtype is float16
3285 3286 3287
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3288
            F.hardsigmoid(x_fp16)
3289 3290


3291 3292 3293 3294 3295
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3296
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3297 3298
    def setUp(self):
        self.op_type = "swish"
3299
        self.python_api = paddle.nn.functional.swish
3300
        self.init_dtype()
3301 3302
        self.init_shape()

3303
        self.check_eager = True
3304

3305
        np.random.seed(1024)
3306
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3307 3308
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3309
        self.attrs = {'beta': 1.0}
3310
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3311

3312 3313 3314
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3315
    def test_check_grad(self):
3316 3317
        if self.dtype == np.float16:
            return
3318 3319 3320 3321
        check_eager = False
        if hasattr(self, 'check_eager'):
            check_eager = self.check_eager
        self.check_grad(['X'], 'Out', check_eager=check_eager)
3322

A
Abhinav Arora 已提交
3323

3324 3325 3326 3327 3328
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3329 3330 3331 3332 3333
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3334 3335 3336
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3337
            else paddle.CPUPlace()
3338
        )
3339 3340 3341 3342

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
J
joejiong 已提交
3343
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
3344 3345 3346 3347 3348 3349 3350
            out1 = F.swish(x)
            swish = paddle.nn.Swish()
            out2 = swish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_swish(self.x_np)
        for r in res:
3351
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3352

3353
    def test_dygraph_api(self):
3354 3355 3356 3357 3358 3359 3360
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3361
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3362 3363 3364 3365 3366 3367
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3368
            out = paddle.nn.functional.swish(x)
3369 3370 3371
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_swish(self.x_np)
3372
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3373

3374
    def test_errors(self):
3375 3376
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
3377
            # The input type must be Variable.
3378
            self.assertRaises(TypeError, F.swish, 1)
3379
            # The input dtype must be float16, float32, float64.
3380 3381 3382
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3383
            self.assertRaises(TypeError, F.swish, x_int32)
3384
            # support the input dtype is float16
3385 3386 3387
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3388
            F.swish(x_fp16)
3389 3390


3391 3392 3393 3394
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3395 3396 3397 3398 3399 3400
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3401
        self.python_api = paddle.nn.functional.mish
3402
        self.init_dtype()
3403
        self.init_shape()
3404 3405

        np.random.seed(1024)
3406
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3407 3408 3409 3410
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3411 3412 3413
    def init_shape(self):
        self.shape = [10, 12]

3414 3415 3416
    def test_check_output(self):
        self.check_output(check_eager=True)

3417 3418 3419
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
3420
        self.check_grad(['X'], 'Out', check_eager=True)
3421 3422


3423 3424 3425 3426 3427
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3428 3429 3430 3431 3432
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3433 3434 3435
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3436
            else paddle.CPUPlace()
3437
        )
3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449

    def test_static_api(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
            out1 = F.mish(x)
            mish = paddle.nn.Mish()
            out2 = mish(x)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
        out_ref = ref_mish(self.x_np)
        for r in res:
3450
            np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3451 3452 3453 3454 3455 3456 3457 3458 3459

    def test_dygraph_api(self):
        paddle.disable_static(self.place)
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3460
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3461 3462 3463 3464 3465 3466
        paddle.enable_static()

    def test_fluid_api(self):
        paddle.enable_static()
        with fluid.program_guard(fluid.Program()):
            x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
3467
            out = paddle.nn.functional.mish(x)
3468 3469 3470
            exe = fluid.Executor(self.place)
            res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
        out_ref = ref_mish(self.x_np)
3471
        np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3472 3473 3474 3475 3476 3477 3478

    def test_errors(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            # The input type must be Variable.
            self.assertRaises(TypeError, F.mish, 1)
            # The input dtype must be float16, float32, float64.
3479 3480 3481
            x_int32 = paddle.fluid.data(
                name='x_int32', shape=[12, 10], dtype='int32'
            )
3482 3483
            self.assertRaises(TypeError, F.mish, x_int32)
            # support the input dtype is float16
3484 3485 3486
            x_fp16 = paddle.fluid.data(
                name='x_fp16', shape=[12, 10], dtype='float16'
            )
3487 3488 3489
            F.mish(x_fp16)


3490
# ------------------ Test Cudnn Activation----------------------
3491
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3492 3493 3494
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3510 3511 3512 3513 3514 3515 3516
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
    parent, atol=1e-3, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3517 3518 3519
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3520

C
chengduo 已提交
3521
        def test_check_output(self):
3522
            place = core.CUDAPlace(0)
C
chengduo 已提交
3523 3524 3525
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
3526

C
chengduo 已提交
3527 3528 3529 3530
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3531 3532 3533
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol
                )
C
chengduo 已提交
3534 3535 3536 3537 3538 3539 3540

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
R
ronnywang 已提交
3541
create_test_act_fp16_class(TestExpm1)
C
chengduo 已提交
3542
create_test_act_fp16_class(TestSigmoid)
M
minghaoBD 已提交
3543
create_test_act_fp16_class(TestSilu)
C
chengduo 已提交
3544 3545
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3546
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3547
create_test_act_fp16_class(TestHardShrink)
3548
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3549 3550 3551 3552 3553
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3554
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3555
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3556
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3557
create_test_act_fp16_class(TestSin)
3558
create_test_act_fp16_class(TestSinh)
3559 3560
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3561 3562 3563
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3564 3565
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
3566
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
3567 3568
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3569
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3570
create_test_act_fp16_class(TestELU)
3571
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3572 3573
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
3574 3575 3576 3577
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3578
create_test_act_fp16_class(TestLog10, atol=5e-2)
3579
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3580 3581
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
3582
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3583 3584 3585 3586 3587
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3588
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
H
huangjun12 已提交
3589
create_test_act_fp16_class(TestHardSwish)
3590
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3591

3592

3593 3594 3595 3596 3597 3598
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3599 3600 3601 3602 3603 3604 3605 3606 3607 3608
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3609 3610 3611
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3612 3613 3614 3615 3616 3617 3618

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3619
create_test_act_bf16_class(TestAbs)
3620

Q
qijun 已提交
3621 3622
if __name__ == "__main__":
    unittest.main()