test_activation_op.py 125.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
W
wanghuancoder 已提交
20
from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard
21 22
from scipy.special import erf, expit

23
import paddle
J
joejiong 已提交
24 25
import paddle.fluid as fluid
import paddle.fluid.core as core
26
import paddle.nn.functional as F
27
import paddle.static as static
28
from paddle.fluid import Program, program_guard
29
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
30 31


32
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
33
    def test_errors(self):
W
wanghuancoder 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
        with paddle_static_guard():
            with program_guard(Program(), Program()):
                # The input type of sqrt op must be Variable or numpy.ndarray.
                in1 = 1
                self.assertRaises(TypeError, paddle.sqrt, in1)
                # The input dtype of sqrt op must be float16, float32, float64.
                in2 = paddle.static.data(
                    name='input2', shape=[-1, 12, 10], dtype="int32"
                )
                self.assertRaises(TypeError, paddle.sqrt, in2)

                in3 = paddle.static.data(
                    name='input3', shape=[-1, 12, 10], dtype="float16"
                )
                paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
49 50


C
chengduo 已提交
51
class TestActivation(OpTest):
Q
qijun 已提交
52 53
    def setUp(self):
        self.op_type = "exp"
54
        self.prim_op_type = "prim"
55
        self.init_dtype()
56
        self.init_shape()
57
        self.init_kernel_type()
C
chentianyu03 已提交
58
        self.python_api = paddle.exp
59
        self.public_python_api = paddle.exp
60

61
        np.random.seed(2049)
62
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
63 64 65 66
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
67 68

    def test_check_output(self):
W
wanghuancoder 已提交
69
        self.check_output()
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
74 75 76 77
        self.check_grad(
            ['X'],
            'Out',
        )
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

Q
qijun 已提交
88

89 90 91 92 93
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


94
class TestExpFp32_Prim(OpTest):
95 96 97 98 99 100
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp
101
        self.public_python_api = paddle.exp
102 103 104 105 106 107 108

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
109
        self.if_enable_cinn()
110 111 112 113 114 115 116 117 118 119 120 121 122

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

123
    def if_enable_cinn(self):
124
        self.enable_cinn = True
125 126


127
class TestExpFp64_Prim(TestExpFp32_Prim):
128 129 130 131
    def init_dtype(self):
        self.dtype = np.float64


132
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
133 134 135
    def init_shape(self):
        self.shape = []

136
    def if_enable_cinn(self):
137 138 139
        self.enable_cinn = False


R
ronnywang 已提交
140 141 142
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
143
        self.python_api = paddle.expm1
R
ronnywang 已提交
144
        self.init_dtype()
145
        self.init_shape()
R
ronnywang 已提交
146 147

        np.random.seed(2049)
148
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
149 150 151 152 153 154
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
W
wanghuancoder 已提交
155
        self.check_grad(['X'], 'Out')
156 157

    def test_check_output(self):
W
wanghuancoder 已提交
158
        self.check_output()
R
ronnywang 已提交
159 160


161 162 163 164 165
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        def run(place):
W
wanghuancoder 已提交
182 183
            with paddle_static_guard():
                with paddle.static.program_guard(paddle.static.Program()):
184
                    X = paddle.static.data('X', self.shape, dtype=self.dtype)
W
wanghuancoder 已提交
185 186 187
                    out = paddle.expm1(X)
                    exe = paddle.static.Executor(place)
                    res = exe.run(feed={'X': self.x})
R
ronnywang 已提交
188
            for r in res:
189
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
190 191 192 193 194 195 196 197

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
198
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
199 200 201 202 203

        for place in self.place:
            run(place)

    def test_errors(self):
W
wanghuancoder 已提交
204 205
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
206
                X = paddle.static.data('X', self.shape, dtype='int32')
W
wanghuancoder 已提交
207
                self.assertRaises(TypeError, paddle.expm1, X)
R
ronnywang 已提交
208 209 210
        # The input dtype must be float16, float32, float64.


211
class TestParameter:
212
    def test_out_name(self):
W
wanghuancoder 已提交
213 214 215 216 217 218 219 220 221 222 223 224
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = eval("paddle.%s(data, name='Y')" % self.op_type)
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = eval("np.%s(np_x)" % self.op_type)
                np.testing.assert_allclose(result, expected, rtol=1e-05)
225 226 227 228 229 230 231

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
232
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
233 234


C
chengduo 已提交
235
class TestSigmoid(TestActivation):
Q
qijun 已提交
236 237
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
238 239 240
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.python_api = paddle.nn.functional.sigmoid
241
        self.public_python_api = paddle.nn.functional.sigmoid
242
        self.init_dtype()
243
        self.init_shape()
244

245
        np.random.seed(1024)
246
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
247 248 249 250
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
251

252 253 254
    def init_dtype(self):
        self.dtype = np.float32

255
    def test_check_grad(self):
256 257
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
258
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
259

260

261 262 263 264 265
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


266 267 268
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
269 270 271
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
272 273 274
        self.prim_op_type = "comp"
        self.enable_cinn = False
        self.python_api = paddle.nn.functional.sigmoid
275
        self.public_python_api = paddle.nn.functional.sigmoid
276
        self.init_dtype()
277
        self.init_shape()
278
        np.random.seed(1024)
279
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
280 281 282 283 284 285 286 287 288 289
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

290 291 292
    def init_shape(self):
        self.shape = [11, 17]

293 294
    def test_check_output(self):
        place = core.CUDAPlace(0)
295
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
296 297 298 299 300 301 302
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out')


303 304 305 306 307 308 309 310
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
311 312 313
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
314
        self.prim_op_type = "comp"
315
        self.enable_cinn = True
Z
zxcd 已提交
316
        self.python_api = paddle.nn.functional.silu
317
        self.public_python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
318
        self.init_dtype()
319
        self.init_shape()
320
        self.if_enable_cinn()
M
minghaoBD 已提交
321 322

        np.random.seed(1024)
323
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
324 325 326 327 328 329 330 331
        out = x / (np.exp(-x) + 1)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}

    def init_dtype(self):
        self.dtype = np.float32

332
    def if_enable_cinn(self):
333 334
        pass

M
minghaoBD 已提交
335
    def test_check_grad(self):
Z
zxcd 已提交
336
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
337 338


339 340 341
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
342

343
    def if_enable_cinn(self):
344
        self.enable_cinn = False
Z
zxcd 已提交
345 346


M
minghaoBD 已提交
347 348 349 350
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
351 352 353
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
354
            else paddle.CPUPlace()
355
        )
M
minghaoBD 已提交
356 357

    def test_static_api(self):
W
wanghuancoder 已提交
358 359
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
360
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
361 362 363 364 365 366 367 368
                out1 = F.silu(x)
                m = paddle.nn.Silu()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = self.x_np / (1 + np.exp(-self.x_np))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
369 370 371 372 373 374 375 376

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
377
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
378 379

    def test_errors(self):
W
wanghuancoder 已提交
380 381 382 383 384
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.silu, 1)
                # The input dtype must be float16, float32, float64.
385
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
386 387 388 389
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.silu, x_int32)
                # support the input dtype is float16
390
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
391 392 393
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.silu(x_fp16)
M
minghaoBD 已提交
394 395


C
chengduo 已提交
396
class TestLogSigmoid(TestActivation):
397 398
    def setUp(self):
        self.op_type = "logsigmoid"
W
wanghuancoder 已提交
399
        self.python_api = paddle.nn.functional.log_sigmoid
400
        self.init_dtype()
401
        self.init_shape()
402

403
        np.random.seed(2048)
404
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
405 406
        out = np.log(1 / (1 + np.exp(-x)))

407
        self.inputs = {'X': x}
408
        self.outputs = {'Out': out}
409 410

    def test_check_grad(self):
411 412
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
413
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
414 415


416 417 418 419 420
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


421
class TestLogSigmoidAPI(unittest.TestCase):
422
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
423
    def setUp(self):
424
        np.random.seed(1024)
425
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
426 427 428
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
429
            else paddle.CPUPlace()
430
        )
431 432

    def test_static_api(self):
W
wanghuancoder 已提交
433 434
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
435
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
436 437 438 439 440 441 442 443
                out1 = F.log_sigmoid(x)
                m = paddle.nn.LogSigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
444 445 446

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
447
        out1 = F.log_sigmoid(x)
448 449 450 451
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
452
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
453 454

    def test_errors(self):
W
wanghuancoder 已提交
455 456 457 458 459
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.log_sigmoid, 1)
                # The input dtype must be float16, float32, float64.
460
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
461 462 463 464
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.log_sigmoid, x_int32)
                # support the input dtype is float16
465
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
466 467 468
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.log_sigmoid(x_fp16)
469 470


471
class TestTanh(TestActivation, TestParameter):
472 473
    def setUp(self):
        self.op_type = "tanh"
W
wanghuancoder 已提交
474
        self.python_api = paddle.tanh
475
        self.init_dtype()
476 477
        self.init_shape()

478
        np.random.seed(1024)
479
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
480 481 482 483
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
484 485

    def test_check_grad(self):
486 487
        if self.dtype == np.float16:
            return
488
        self.check_grad(['X'], 'Out')
489

490
    def init_dtype(self):
491
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
492 493 494 495
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

496

497 498 499 500 501
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
502 503 504 505
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
506
        np.random.seed(1024)
W
WangXi 已提交
507
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
508 509 510
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
511
            else paddle.CPUPlace()
512
        )
513 514 515 516
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
517 518

    def test_static_api(self):
W
wanghuancoder 已提交
519 520
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
521
                x = paddle.static.data('X', [10, 12], self.dtype)
W
wanghuancoder 已提交
522 523 524 525 526 527 528 529
                out1 = self.tanh(x)
                th = paddle.nn.Tanh()
                out2 = th(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.tanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
530 531

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
532
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
533 534 535 536 537 538
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
539
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
540 541

    def test_errors(self):
W
wanghuancoder 已提交
542 543 544 545 546
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.tanh, 1)
                # The input dtype must be float16, float32.
547
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
548 549 550 551
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, self.tanh, x_int32)
                # support the input dtype is float16
552
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
553 554 555
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                self.tanh(x_fp16)
556 557 558 559 560 561


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
562 563


564
class TestAtan(TestActivation, TestParameter):
565 566
    def setUp(self):
        self.op_type = "atan"
W
wanghuancoder 已提交
567
        self.python_api = paddle.atan
568
        self.init_dtype()
569
        self.init_shape()
570

571
        np.random.seed(1024)
572
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
573 574 575 576 577 578 579 580
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
581
        self.check_grad(['X'], 'Out')
582

W
WuHaobo 已提交
583
    def test_out_name(self):
W
wanghuancoder 已提交
584 585 586 587 588 589 590 591 592 593 594 595
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = paddle.atan(data, name='Y')
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = np.arctan(np_x)
                self.assertEqual(result, expected)
W
WuHaobo 已提交
596

597 598 599 600 601 602 603 604
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

605

606 607 608 609 610
class TestAtan_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


611 612 613
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
W
wanghuancoder 已提交
614
        self.python_api = paddle.sinh
615
        self.init_dtype()
616
        self.init_shape()
617

618
        np.random.seed(1024)
619
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
620 621 622 623 624 625 626 627 628 629
        out = np.sinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

630 631 632 633 634 635 636

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
637 638 639 640
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
641
            z = paddle.sinh(x).numpy()
642
            z_expected = np.sinh(np_x)
643
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
644 645

    def test_api(self):
W
wanghuancoder 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
        with paddle_static_guard():
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_sinh_out = paddle.sinh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_sinh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_sinh_out],
                )

            expected_res = np.sinh(input_x)
            np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
669 670 671 672

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
673 674 675
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
676 677
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
678
            loss = paddle.sinh(var)
679 680 681 682 683 684 685
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
W
wanghuancoder 已提交
686 687 688 689 690
        with paddle_static_guard():
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.sinh, 1)
                # The input dtype must be float16, float32, float64.
691
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
692 693 694 695
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.sinh, x_int32)
                # support the input dtype is float16
696
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
697 698 699
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.sinh(x_fp16)
700 701 702 703 704


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
W
wanghuancoder 已提交
705
        self.python_api = paddle.cosh
706
        self.init_dtype()
707
        self.init_shape()
708

709
        np.random.seed(1024)
710
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
711 712 713 714 715 716 717 718 719 720
        out = np.cosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

721 722 723 724 725 726 727

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
728 729 730 731
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
732
            z = paddle.cosh(x).numpy()
733
            z_expected = np.cosh(np_x)
734
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
735 736

    def test_api(self):
W
wanghuancoder 已提交
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
        with paddle_static_guard():
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_cosh_out = paddle.cosh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_cosh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_cosh_out],
                )

            expected_res = np.cosh(input_x)
            np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
760 761 762 763

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
764 765 766
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
767 768
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
769
            loss = paddle.cosh(var)
770 771 772 773 774 775 776
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
W
wanghuancoder 已提交
777 778 779 780 781
        with paddle_static_guard():
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.cosh, 1)
                # The input dtype must be float16, float32, float64.
782
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
783 784 785 786
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.cosh, x_int32)
                # support the input dtype is float16
787
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
788 789 790
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.cosh(x_fp16)
791 792


793 794 795 796 797 798
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
799 800
    def setUp(self):
        self.op_type = "tanh_shrink"
W
wanghuancoder 已提交
801
        self.python_api = paddle.nn.functional.tanhshrink
802
        self.init_dtype()
803
        self.init_shape()
804

805
        np.random.seed(1024)
806
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
807
        out = ref_tanhshrink(x)
808

809
        self.inputs = {'X': x}
810
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
811 812

    def test_check_grad(self):
813 814
        if self.dtype == np.float16:
            return
815
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
816

817

818 819 820 821 822
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


823 824 825
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
826
        np.random.seed(1024)
827
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
828 829 830
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
831
            else paddle.CPUPlace()
832
        )
833 834

    def test_static_api(self):
W
wanghuancoder 已提交
835 836
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
837
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
838 839 840 841 842 843 844 845
                out1 = F.tanhshrink(x)
                tanhshrink = paddle.nn.Tanhshrink()
                out2 = tanhshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_tanhshrink(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
846 847 848 849 850 851 852 853

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
854
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
855 856

    def test_errors(self):
W
wanghuancoder 已提交
857 858 859 860 861
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.tanhshrink, 1)
                # The input dtype must be float16, float32, float64.
862
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
863 864 865 866
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.tanhshrink, x_int32)
                # support the input dtype is float16
867
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
868 869 870
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.tanhshrink(x_fp16)
871 872


873 874 875 876 877 878
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
879
class TestHardShrink(TestActivation):
880 881
    def setUp(self):
        self.op_type = "hard_shrink"
W
wanghuancoder 已提交
882
        self.python_api = paddle.nn.functional.hardshrink
883
        self.init_dtype()
884
        self.init_shape()
885

886 887
        self.threshold = 0.5
        self.set_attrs()
888
        np.random.seed(1024)
889
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
890
        out = ref_hardshrink(x, self.threshold)
891

892
        self.attrs = {'threshold': self.threshold}
893
        self.inputs = {'X': x}
894
        self.outputs = {'Out': out}
895

896 897 898
    def init_shape(self):
        self.shape = [10, 12]

899 900 901
    def set_attrs(self):
        pass

902
    def test_check_grad(self):
903 904
        if self.dtype == np.float16:
            return
905
        self.check_grad(['X'], 'Out')
906 907


908 909 910 911 912
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


913 914 915 916 917 918 919 920
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


921 922 923
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
924
        np.random.seed(1024)
925
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
926 927 928
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
929
            else paddle.CPUPlace()
930
        )
931 932

    def test_static_api(self):
W
wanghuancoder 已提交
933 934
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
935
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
936 937 938 939 940 941 942 943
                out1 = F.hardshrink(x)
                hd = paddle.nn.Hardshrink()
                out2 = hd(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
944 945

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
946
        x = paddle.to_tensor(self.x_np)
947 948 949 950 951
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
952
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
953 954 955 956 957 958

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
959
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
960

961
    def test_errors(self):
W
wanghuancoder 已提交
962 963 964 965 966
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardshrink, 1)
                # The input dtype must be float16, float32, float64.
967
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
968 969 970 971
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardshrink, x_int32)
                # support the input dtype is float16
972
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
973 974 975
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardshrink(x_fp16)
976 977


978 979 980 981 982 983 984 985 986 987 988
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
989
        np.random.seed(1024)
990
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
991 992 993
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
994
            else paddle.CPUPlace()
995
        )
996 997

    def test_static_api(self):
W
wanghuancoder 已提交
998 999
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
1000
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1001 1002 1003 1004 1005 1006 1007 1008
                out1 = F.hardtanh(x)
                m = paddle.nn.Hardtanh()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardtanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1009 1010

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
1011
        x = paddle.to_tensor(self.x_np)
1012 1013 1014 1015 1016
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1017
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1018 1019 1020 1021 1022 1023

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1024
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1025 1026

    def test_errors(self):
W
wanghuancoder 已提交
1027 1028 1029 1030 1031
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardtanh, 1)
                # The input dtype must be float16, float32, float64.
1032
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1033 1034 1035 1036
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardtanh, x_int32)
                # support the input dtype is float16
1037
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1038 1039 1040
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardtanh(x_fp16)
1041 1042


1043 1044 1045
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1046 1047
        out - threshold
    )
1048 1049 1050 1051
    return out


class TestSoftshrink(TestActivation):
1052 1053
    def setUp(self):
        self.op_type = "softshrink"
1054
        self.python_api = paddle.nn.functional.softshrink
1055
        self.init_dtype()
1056
        self.init_shape()
1057

1058
        threshold = 0.8
1059

1060
        np.random.seed(1023)
1061
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1062 1063 1064
        out = ref_softshrink(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"lambda": threshold}
1065
        self.outputs = {'Out': out}
1066 1067

    def test_check_grad(self):
1068 1069
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1070
        self.check_grad(['X'], 'Out')
1071

1072

1073 1074 1075 1076 1077
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1078 1079 1080 1081
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1082
        np.random.seed(1024)
1083
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1084 1085 1086
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1087
            else paddle.CPUPlace()
1088
        )
1089 1090

    def test_static_api(self):
W
wanghuancoder 已提交
1091 1092
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
1093
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
1094 1095 1096 1097 1098 1099 1100 1101
                out1 = F.softshrink(x, self.threshold)
                softshrink = paddle.nn.Softshrink(self.threshold)
                out2 = softshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1102 1103 1104 1105 1106 1107 1108 1109

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1110
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1111

1112
    def test_errors(self):
W
wanghuancoder 已提交
1113 1114 1115 1116 1117
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softshrink, 1)
                # The input dtype must be float16, float32, float64.
1118
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1119 1120 1121 1122
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softshrink, x_int32)
                # The threshold must be no less than zero
1123
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
1124 1125 1126 1127
                    name='x_fp32', shape=[12, 10], dtype='float32'
                )
                self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
                # support the input dtype is float16
1128
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1129 1130 1131
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softshrink(x_fp16)
1132 1133


1134
class TestSqrt(TestActivation, TestParameter):
1135 1136
    def setUp(self):
        self.op_type = "sqrt"
1137
        self.prim_op_type = "prim"
1138
        self.python_api = paddle.sqrt
1139 1140
        self.public_python_api = paddle.sqrt

1141
        self.init_dtype()
1142
        self.init_shape()
1143

1144
        np.random.seed(1023)
1145
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1146 1147 1148 1149
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1150
        self.enable_cinn = False
1151

1152
    # TODO(wanghao107) add prim test
1153
    def test_check_grad(self):
1154 1155
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1156
        self.check_grad(['X'], 'Out')
1157 1158

    def test_check_output(self):
W
wanghuancoder 已提交
1159
        self.check_output()
1160

1161

1162 1163 1164 1165 1166
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
1167
        self.public_python_api = paddle.sqrt
1168 1169 1170 1171 1172 1173 1174 1175
        self.init_dtype()
        self.init_shape()
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1176
        self.enable_cinn = True
1177 1178 1179 1180

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1181
        self.check_grad(['X'], 'Out', check_prim=True)
1182 1183

    def test_check_output(self):
W
wanghuancoder 已提交
1184
        self.check_output()
1185 1186 1187 1188 1189

    def init_dtype(self):
        self.dtype = np.float32


1190 1191 1192
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []
1193
        self.enable_cinn = False
1194 1195


1196 1197 1198
class TestSqrtPrim_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []
1199
        self.enable_cinn = False
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209

    def init_dtype(self):
        self.dtype = np.float32

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_prim=True)


1210 1211 1212
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
1213 1214 1215
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1216
        self.prim_op_type = "prim"
1217
        self.python_api = paddle.sqrt
1218
        self.public_python_api = paddle.sqrt
1219
        self.init_dtype()
1220
        self.init_shape()
1221 1222

        np.random.seed(1023)
1223
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1224 1225 1226 1227 1228 1229
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}
1230
        self.enable_cinn = False
1231 1232 1233 1234

    def init_dtype(self):
        self.dtype = np.uint16

1235 1236 1237
    def init_shape(self):
        self.shape = [11, 17]

1238 1239
    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1240
        self.check_output_with_place(place)
1241 1242 1243

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1244
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
1245 1246


M
mhy-666 已提交
1247 1248 1249 1250 1251
class TestSqrtComp(TestActivation, TestParameter):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1252
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
        self.init_dtype()
        self.init_shape()

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.enable_cinn = True

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)


class TestSqrtCompFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1278
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
        self.init_dtype()
        self.init_shape()
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.enable_cinn = True

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32


Z
zhoukunsheng 已提交
1301 1302 1303
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
1304
        self.prim_op_type = "comp"
Z
zyfncg 已提交
1305
        self.python_api = paddle.rsqrt
1306
        self.public_python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1307
        self.init_dtype()
1308
        self.init_shape()
Z
zhoukunsheng 已提交
1309

1310
        np.random.seed(1024)
1311
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
1312 1313 1314 1315
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1316
        self.enable_cinn = True
Z
zhoukunsheng 已提交
1317

1318 1319 1320
    def init_shape(self):
        self.shape = [10, 12]

1321 1322 1323
    def test_check_output(self):
        self.check_output(check_prim=True)

Z
zhoukunsheng 已提交
1324 1325 1326
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1327 1328 1329 1330 1331 1332
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.0005,
            check_prim=True,
        )
Z
zhoukunsheng 已提交
1333 1334


1335 1336 1337 1338 1339 1340 1341 1342
'''
class TestRsqrt_ZeroDim(TestRsqrt):

    def init_shape(self):
        self.shape = []
'''


C
chengduo 已提交
1343
class TestAbs(TestActivation):
1344 1345
    def setUp(self):
        self.op_type = "abs"
1346 1347
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
1348
        self.public_python_api = paddle.abs
1349
        self.enable_cinn = False
1350
        self.init_dtype()
1351
        self.init_shape()
1352

1353
        np.random.seed(1024)
1354
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1355
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1356
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1357
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1358 1359
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1360 1361 1362 1363
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1364

1365 1366 1367
    def init_shape(self):
        self.shape = [4, 25]

1368
    def test_check_grad(self):
1369 1370
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1371
        self.check_grad(['X'], 'Out', check_prim=True)
1372

1373

1374 1375 1376 1377 1378
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1379
class TestCeil(TestActivation):
D
dzhwinter 已提交
1380 1381
    def setUp(self):
        self.op_type = "ceil"
1382
        self.python_api = paddle.ceil
1383
        self.init_dtype()
1384
        self.init_shape()
1385

1386
        np.random.seed(1024)
1387
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1388 1389 1390 1391
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1392

1393 1394 1395
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1396
    # The same reason with TestFloor
C
chengduo 已提交
1397
    def test_check_grad(self):
1398 1399 1400
        pass


1401 1402 1403 1404 1405
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1406
class TestFloor(TestActivation):
D
dzhwinter 已提交
1407 1408
    def setUp(self):
        self.op_type = "floor"
1409
        self.prim_op_type = "prim"
1410
        self.python_api = paddle.floor
1411
        self.public_python_api = paddle.floor
1412
        self.init_dtype()
1413
        self.init_shape()
1414

1415
        np.random.seed(1024)
1416
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1417 1418 1419 1420
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1421

1422 1423 1424
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1425
    # the gradient on floor, ceil, round is undefined.
1426
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1427 1428
    # The same reason with TestFloor
    def test_check_grad(self):
1429 1430 1431
        pass


1432 1433 1434 1435 1436
class TestFloor_ZeroDim(TestFloor):
    def init_shape(self):
        self.shape = []


1437
class TestFloor_Prim(TestActivation):
1438 1439 1440 1441
    def setUp(self):
        self.op_type = "floor"
        self.prim_op_type = "prim"
        self.python_api = paddle.floor
1442
        self.public_python_api = paddle.floor
1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
        self.init_dtype()
        self.init_shape()

        if len(self.shape) == 0:
            # for 0-D tensor, skip cinn testing
            self.enable_cinn = False

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def init_shape(self):
        self.shape = [10, 12]

    def test_check_grad(self):
1461 1462 1463 1464 1465
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
        self.check_grad(['X'], 'Out', check_prim=True, only_check_prim=True)
1466 1467


1468
class TestFloor_ZeroDim_Prim(TestFloor_Prim):
1469 1470 1471 1472
    def init_shape(self):
        self.shape = []


1473
class TestFloorFp16_Prim(TestFloor_Prim):
1474 1475 1476 1477
    def init_dtype(self):
        self.dtype = np.float16


C
chengduo 已提交
1478
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1479 1480
    def setUp(self):
        self.op_type = "cos"
W
wanghuancoder 已提交
1481
        self.python_api = paddle.cos
1482 1483
        self.public_python_api = paddle.cos
        self.prim_op_type = "prim"
1484
        self.init_dtype()
1485
        self.init_shape()
1486 1487
        # prim not support now
        self.enable_cinn = False
1488

1489
        np.random.seed(1024)
1490
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1491 1492 1493 1494
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
1495

1496 1497 1498
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1499
    def test_check_grad(self):
1500 1501
        if self.dtype == np.float16:
            return
1502
        self.check_grad(['X'], 'Out', check_prim=True)
C
add sin  
chengduoZH 已提交
1503

1504

1505 1506 1507 1508 1509
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1510 1511 1512 1513
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
W
wanghuancoder 已提交
1514
        self.python_api = paddle.tan
J
joejiong 已提交
1515
        self.init_dtype()
1516 1517
        self.init_shape()

J
joejiong 已提交
1518
        self.dtype = 'float32'
1519
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1520 1521 1522
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1523
            else paddle.CPUPlace()
1524
        )
J
joejiong 已提交
1525 1526 1527 1528 1529 1530

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}

1531 1532 1533
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1534 1535 1536 1537 1538
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1550 1551 1552
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1553
            else paddle.CPUPlace()
1554
        )
1555

J
joejiong 已提交
1556 1557 1558 1559
    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1560
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1561 1562

    def test_static_api(self):
W
wanghuancoder 已提交
1563 1564 1565 1566 1567 1568 1569 1570
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', [11, 17], self.dtype)
                out = paddle.tan(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1571 1572 1573 1574

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1575 1576 1577
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1578 1579 1580 1581 1582 1583 1584 1585
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1586 1587 1588
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
W
wanghuancoder 已提交
1589
        self.python_api = paddle.acos
1590
        self.init_dtype()
1591
        self.init_shape()
1592

1593
        np.random.seed(1024)
1594
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1595 1596 1597 1598 1599
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1600 1601 1602
    def init_shape(self):
        self.shape = [10, 12]

1603 1604 1605
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1606
        self.check_grad(['X'], 'Out')
1607 1608


1609 1610 1611 1612 1613
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1614
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1615 1616
    def setUp(self):
        self.op_type = "sin"
W
wanghuancoder 已提交
1617
        self.python_api = paddle.sin
1618 1619
        self.public_python_api = paddle.sin
        self.prim_op_type = "prim"
1620
        self.init_dtype()
1621
        self.init_shape()
1622 1623
        # prim not support now
        self.enable_cinn = False
1624

1625
        np.random.seed(1024)
1626
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1627 1628 1629 1630
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
1631

1632 1633 1634
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1635
    def test_check_grad(self):
1636 1637
        if self.dtype == np.float16:
            return
1638
        self.check_grad(['X'], 'Out', check_prim=True)
C
add cos  
chengduoZH 已提交
1639 1640


1641 1642 1643 1644 1645
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1646 1647 1648
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
W
wanghuancoder 已提交
1649
        self.python_api = paddle.asin
1650
        self.init_dtype()
1651
        self.init_shape()
1652

1653
        np.random.seed(2048)
1654
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1655 1656 1657 1658 1659
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1660 1661 1662
    def init_shape(self):
        self.shape = [10, 12]

1663 1664 1665
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1666
        self.check_grad(['X'], 'Out')
1667 1668


1669 1670 1671 1672 1673
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1674 1675 1676
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
W
wanghuancoder 已提交
1677
        self.python_api = paddle.acosh
X
xiaoting 已提交
1678
        self.init_dtype()
1679
        self.init_shape()
X
xiaoting 已提交
1680 1681

        np.random.seed(1024)
1682
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1683 1684 1685 1686 1687
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1688 1689 1690
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1691 1692 1693 1694 1695 1696
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1697 1698 1699 1700 1701
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1702 1703 1704
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
W
wanghuancoder 已提交
1705
        self.python_api = paddle.asinh
X
xiaoting 已提交
1706
        self.init_dtype()
1707
        self.init_shape()
X
xiaoting 已提交
1708 1709

        np.random.seed(1024)
1710
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1711 1712 1713 1714 1715
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1716 1717 1718
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1719 1720 1721 1722 1723 1724
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1725 1726 1727 1728 1729
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1730 1731 1732
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
W
wanghuancoder 已提交
1733
        self.python_api = paddle.atanh
X
xiaoting 已提交
1734
        self.init_dtype()
1735
        self.init_shape()
X
xiaoting 已提交
1736 1737

        np.random.seed(400)
1738
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1739 1740 1741 1742 1743
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

1744 1745 1746
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1747 1748 1749 1750 1751 1752
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1753 1754 1755 1756 1757
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1758
class TestRound(TestActivation):
D
dzhwinter 已提交
1759 1760
    def setUp(self):
        self.op_type = "round"
1761
        self.python_api = paddle.round
1762
        self.init_dtype()
1763
        self.init_shape()
1764

1765
        np.random.seed(1024)
1766
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1767 1768 1769 1770
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
1771

1772 1773 1774
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1775
    def test_check_grad(self):
1776 1777 1778
        pass


1779 1780 1781 1782 1783
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1784
class TestRelu(TestActivation):
1785
    def setUp(self):
Q
qijun 已提交
1786
        self.op_type = "relu"
K
Kang Zhao 已提交
1787 1788
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
1789
        self.public_python_api = paddle.nn.functional.relu
K
Kexin Zhao 已提交
1790
        self.init_dtype()
1791
        self.init_shape()
K
Kang Zhao 已提交
1792
        self.skip_cinn()
K
Kexin Zhao 已提交
1793

1794
        np.random.seed(1024)
1795
        if self.dtype == np.uint16:
1796
            x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
1797 1798 1799 1800 1801
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = convert_float_to_uint16(np.maximum(x, 0))
            self.inputs = {'X': convert_float_to_uint16(x)}
        else:
1802
            x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1803 1804 1805 1806
            # The same reason with TestAbs
            x[np.abs(x) < 0.005] = 0.02
            out = np.maximum(x, 0)
            self.inputs = {'X': x}
K
Kexin Zhao 已提交
1807 1808

        self.outputs = {'Out': out}
1809 1810

    def test_check_grad(self):
K
Kexin Zhao 已提交
1811 1812
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1813 1814 1815 1816 1817 1818 1819
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

    def skip_cinn(self):
        self.enable_cinn = False
A
Adam 已提交
1820 1821


1822 1823 1824 1825
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []

K
Kang Zhao 已提交
1826 1827 1828
    def skip_cinn(self):
        self.enable_cinn = False

1829

1830 1831 1832
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1833
        np.random.seed(1024)
1834
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1835 1836 1837
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1838
            else paddle.CPUPlace()
1839
        )
1840 1841 1842 1843
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1844 1845

    def test_static_api(self):
W
wanghuancoder 已提交
1846 1847
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
1848
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1849 1850 1851 1852 1853 1854 1855 1856
                out1 = self.relu(x)
                m = paddle.nn.ReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.maximum(self.x_np, 0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1857 1858 1859 1860

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1861 1862
        out1 = m(x)
        out2 = self.relu(x)
1863 1864
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1865
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1866

1867
    def test_errors(self):
W
wanghuancoder 已提交
1868 1869 1870 1871 1872 1873
        with paddle_static_guard():
            with paddle_static_guard():
                with paddle.static.program_guard(paddle.static.Program()):
                    # The input type must be Variable.
                    self.assertRaises(TypeError, self.relu, 1)
                    # The input dtype must be float16, float32, float64.
1874
                    x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1875 1876 1877 1878
                        name='x_int32', shape=[10, 12], dtype='int32'
                    )
                    self.assertRaises(TypeError, self.relu, x_int32)
                    # support the input dtype is float16
1879
                    x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1880 1881 1882
                        name='x_fp16', shape=[10, 12], dtype='float16'
                    )
                    self.relu(x_fp16)
1883 1884 1885 1886 1887 1888


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1889 1890


1891 1892 1893 1894 1895 1896
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1897
class TestLeakyRelu(TestActivation):
1898 1899 1900
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1901 1902
    def setUp(self):
        self.op_type = "leaky_relu"
W
wanghuancoder 已提交
1903
        self.python_api = paddle.nn.functional.leaky_relu
A
Adam 已提交
1904
        self.init_dtype()
1905
        self.init_shape()
1906
        alpha = self.get_alpha()
A
Adam 已提交
1907

1908
        np.random.seed(1024)
1909
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1910
        # The same reason with TestAbs
1911 1912
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1913

1914
        self.inputs = {'X': x}
A
Adam 已提交
1915
        self.outputs = {'Out': out}
1916
        self.attrs = {'alpha': alpha}
A
Adam 已提交
1917 1918 1919 1920

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1921
        self.check_grad(['X'], 'Out')
1922 1923


1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1939 1940 1941 1942 1943
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []


1944 1945 1946
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1947
        np.random.seed(1024)
1948
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1949 1950 1951
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1952
            else paddle.CPUPlace()
1953
        )
1954 1955

    def test_static_api(self):
W
wanghuancoder 已提交
1956 1957
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
1958
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1959 1960 1961 1962 1963 1964 1965 1966
                out1 = F.leaky_relu(x)
                m = paddle.nn.LeakyReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_leaky_relu(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1967 1968

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
1969
        x = paddle.to_tensor(self.x_np)
1970 1971 1972 1973 1974
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
1975
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1976 1977 1978 1979 1980 1981

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
1982
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1983

1984
    def test_errors(self):
W
wanghuancoder 已提交
1985 1986 1987 1988 1989
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.leaky_relu, 1)
                # The input dtype must be float16, float32, float64.
1990
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1991 1992 1993 1994
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.leaky_relu, x_int32)
                # support the input dtype is float16
1995
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1996 1997 1998
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.leaky_relu(x_fp16)
1999 2000


2001 2002
def gelu(x, approximate):
    if approximate:
2003 2004 2005 2006 2007 2008 2009 2010
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
2011 2012 2013 2014 2015 2016
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
2017 2018
    def setUp(self):
        self.op_type = "gelu"
2019 2020
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2021
        self.public_python_api = paddle.nn.functional.gelu
C
Clementine 已提交
2022
        self.init_dtype()
2023
        self.init_shape()
2024
        approximate = True
2025
        np.random.seed(1024)
2026
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2027
        out = gelu(x, approximate)
2028
        self.enable_cinn = False
C
Clementine 已提交
2029

2030
        self.inputs = {'X': x}
2031 2032 2033
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

C
cxxly 已提交
2034 2035 2036 2037
        # The backward decomposite of gelu is inconsistent with raw kernel,
        # lower threshold to support 1e-5 for pass the unittest
        self.rev_comp_rtol = 1e-5

2038 2039 2040
    def test_check_output(self):
        self.check_output(check_prim=True)

2041 2042 2043
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2044
        self.check_grad(['X'], 'Out', check_prim=True)
2045 2046 2047 2048 2049


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
2050 2051
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2052
        self.public_python_api = paddle.nn.functional.gelu
2053
        self.init_dtype()
2054
        self.init_shape()
2055
        approximate = False
2056
        np.random.seed(2048)
2057
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2058
        out = gelu(x, approximate)
2059
        self.if_enable_cinn()
C
Clementine 已提交
2060

2061
        self.inputs = {'X': x}
C
Clementine 已提交
2062
        self.outputs = {'Out': out}
2063
        self.attrs = {"approximate": approximate}
C
cxxly 已提交
2064 2065 2066
        # The backward decomposite of gelu is inconsistent with raw kernel,
        # lower threshold to support 1e-5 for pass the unittest
        self.rev_comp_rtol = 1e-5
C
Clementine 已提交
2067

2068 2069 2070 2071 2072 2073
    def if_enable_cinn(self):
        self.enable_cinn = False

    def test_check_output(self):
        self.check_output(check_prim=True)

C
Clementine 已提交
2074 2075 2076
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2077
        self.check_grad(['X'], 'Out', check_prim=True)
C
Clementine 已提交
2078 2079


2080 2081 2082 2083
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []

2084 2085 2086
    def if_enable_cinn(self):
        self.enable_cinn = False

2087

2088 2089 2090
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2091
        np.random.seed(1024)
2092
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2093 2094 2095
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2096
            else paddle.CPUPlace()
2097
        )
C
cxxly 已提交
2098 2099 2100 2101 2102
        self.enable_cinn = False

        # The backward decomposite of gelu is inconsistent with raw kernel,
        # lower threshold to support 1e-5 for pass the unittest
        self.rev_comp_rtol = 1e-5
2103 2104

    def test_static_api(self):
W
wanghuancoder 已提交
2105 2106
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
2107
                x = paddle.static.data('X', [11, 17], dtype="float32")
W
wanghuancoder 已提交
2108 2109 2110 2111 2112 2113 2114 2115
                out1 = F.gelu(x)
                m = paddle.nn.GELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = gelu(self.x_np, False)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2116 2117 2118 2119 2120 2121 2122 2123

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2124
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2125 2126 2127 2128 2129 2130

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2131
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2132 2133

    def test_errors(self):
W
wanghuancoder 已提交
2134 2135 2136 2137 2138
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.gelu, 1)
                # The input dtype must be float16, float32, float64.
2139
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2140 2141 2142 2143
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.gelu, x_int32)
                # support the input dtype is float16
2144
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2145 2146 2147
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.gelu(x_fp16)
2148 2149


C
chengduo 已提交
2150
class TestBRelu(TestActivation):
2151 2152
    def setUp(self):
        self.op_type = "brelu"
W
wanghuancoder 已提交
2153
        self.python_api = paddle.nn.functional.hardtanh
2154 2155
        self.init_dtype()

2156
        np.random.seed(1024)
Z
zhupengyang 已提交
2157
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2158 2159
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2160 2161
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2162
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2163 2164 2165
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2166 2167 2168

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
2169
        self.outputs = {'Out': t}
2170 2171

    def test_check_grad(self):
2172 2173
        if self.dtype == np.float16:
            return
2174
        self.check_grad(['X'], 'Out')
2175

2176

2177 2178 2179 2180 2181 2182 2183
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2184
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2185
    def setUp(self):
2186
        self.op_type = "relu6"
2187
        self.init_dtype()
2188
        self.init_shape()
2189
        self.python_api = paddle.nn.functional.relu6
2190

2191
        np.random.seed(1024)
2192
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2193
        x[np.abs(x) < 0.005] = 0.02
2194
        out = ref_relu6(x)
2195

2196 2197
        self.inputs = {'X': x}
        self.attrs = {'threshold': 6.0}
2198
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
2199

2200 2201 2202
    def init_shape(self):
        self.shape = [10, 12]

2203 2204 2205
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2206
        self.check_grad(['X'], 'Out')
2207 2208


2209 2210 2211 2212 2213
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2214 2215 2216
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2217
        np.random.seed(1024)
2218 2219
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2220 2221 2222
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2223
            else paddle.CPUPlace()
2224
        )
2225 2226

    def test_static_api(self):
W
wanghuancoder 已提交
2227 2228
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
2229
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2230 2231 2232 2233 2234 2235 2236 2237
                out1 = F.relu6(x)
                relu6 = paddle.nn.ReLU6()
                out2 = relu6(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_relu6(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2238 2239 2240 2241 2242 2243 2244 2245

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2246
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2247 2248

    def test_fluid_api(self):
W
wanghuancoder 已提交
2249 2250
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
2251
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2252 2253 2254 2255 2256
                out = paddle.nn.functional.relu6(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_relu6(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2257

2258
    def test_errors(self):
W
wanghuancoder 已提交
2259 2260 2261 2262 2263
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.relu6, 1)
                # The input dtype must be float16, float32, float64.
2264
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2265 2266 2267 2268
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.relu6, x_int32)
                # support the input dtype is float16
2269
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2270 2271 2272
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.relu6(x_fp16)
2273 2274


2275 2276
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
W
wanghuancoder 已提交
2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299
        with paddle_static_guard():
            with warnings.catch_warnings(record=True) as context:
                warnings.simplefilter("always")

                helper = LayerHelper("relu6")
                data = paddle.static.data(
                    name='data', shape=[None, 3, 32, 32], dtype='float32'
                )
                out = helper.create_variable_for_type_inference(
                    dtype=data.dtype
                )
                os.environ['FLAGS_print_extra_attrs'] = "1"
                helper.append_op(
                    type="relu6",
                    inputs={'X': data},
                    outputs={'Out': out},
                    attrs={'threshold': 6.0},
                )
                self.assertTrue(
                    "op relu6 use extra_attr: threshold"
                    in str(context[-1].message)
                )
                os.environ['FLAGS_print_extra_attrs'] = "0"
2300 2301


2302
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2303 2304 2305 2306
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2307 2308 2309
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2310 2311


H
huangjun12 已提交
2312 2313 2314 2315
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2316
        self.init_shape()
R
Roc 已提交
2317
        self.prim_op_type = "comp"
2318
        self.python_api = paddle.nn.functional.hardswish
2319
        self.public_python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2320

2321
        np.random.seed(1024)
2322
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2323 2324 2325
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2326
        # the same with TestAbs
H
huangjun12 已提交
2327 2328
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2329
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2330

2331
        self.inputs = {'X': x}
H
huangjun12 已提交
2332 2333
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}
R
Roc 已提交
2334
        self.enable_cinn = False
H
huangjun12 已提交
2335

2336 2337 2338
    def init_shape(self):
        self.shape = [10, 12]

2339 2340 2341
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2342
    def test_check_grad(self):
2343 2344 2345 2346 2347 2348
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2349 2350

    def test_check_output(self):
W
wanghuancoder 已提交
2351
        self.check_output(check_prim=True)
H
huangjun12 已提交
2352 2353


2354
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2355 2356 2357 2358 2359 2360 2361 2362
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

    def init_shape(self):
        self.shape = []


2363 2364 2365 2366
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2367 2368 2369
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2370
            else paddle.CPUPlace()
2371
        )
2372 2373

    def test_static_api(self):
W
wanghuancoder 已提交
2374 2375
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
2376
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2377 2378 2379 2380 2381 2382 2383 2384
                out1 = F.hardswish(x)
                m = paddle.nn.Hardswish()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardswish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2385 2386

    def test_dygraph_api(self):
2387
        x = paddle.to_tensor([11648.0, 11448.0])
2388 2389 2390
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2391
        out_ref = [11648.0, 11448.0]
2392
        for r in [out1, out2]:
2393
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2394 2395

    def test_fluid_api(self):
W
wanghuancoder 已提交
2396 2397
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
2398
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2399 2400 2401 2402 2403 2404
                out = paddle.nn.functional.hardswish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardswish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

2405
        x = paddle.to_tensor(self.x_np)
2406
        out = paddle.nn.functional.hardswish(x)
2407
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2408 2409

    def test_errors(self):
W
wanghuancoder 已提交
2410 2411 2412 2413 2414
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardswish, 1)
                # The input dtype must be float16, float32, float64.
2415
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2416 2417 2418 2419
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardswish, x_int32)
                # support the input dtype is float16
2420
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2421 2422 2423
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardswish(x_fp16)
2424 2425


C
chengduo 已提交
2426
class TestSoftRelu(TestActivation):
2427 2428
    def setUp(self):
        self.op_type = "soft_relu"
2429 2430
        self.init_dtype()

2431
        np.random.seed(4096)
2432
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2433
        threshold = 2.0
Q
qijun 已提交
2434 2435
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2436
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2437 2438 2439
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2440 2441 2442 2443 2444
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
2445 2446

    def test_check_grad(self):
2447 2448
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2449 2450 2451
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.02, check_dygraph=False
        )
2452

2453

2454
def elu(x, alpha):
Z
zhupengyang 已提交
2455
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2456 2457 2458
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2459
class TestELU(TestActivation):
2460 2461
    def setUp(self):
        self.op_type = "elu"
2462
        self.init_dtype()
2463
        self.init_shape()
W
wanghuancoder 已提交
2464
        self.python_api = paddle.nn.functional.elu
2465

2466
        np.random.seed(1024)
2467
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2468
        alpha = self.get_alpha()
2469
        out = elu(x, alpha)
2470 2471 2472 2473
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
2474
        self.outputs = {'Out': out}
2475

2476 2477 2478
    def init_shape(self):
        self.shape = [10, 12]

2479
    def test_check_grad(self):
2480 2481
        if self.dtype == np.float16:
            return
2482
        self.check_grad(['X'], 'Out')
2483

Z
zhupengyang 已提交
2484
    def get_alpha(self):
2485
        return 1.0
Z
zhupengyang 已提交
2486 2487 2488 2489 2490 2491


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2492

2493 2494 2495 2496 2497
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2498 2499 2500
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2501
        np.random.seed(1024)
2502
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2503 2504 2505
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2506
            else paddle.CPUPlace()
2507
        )
2508 2509 2510 2511
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2512 2513

    def test_static_api(self):
W
wanghuancoder 已提交
2514 2515
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
2516
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2517 2518 2519 2520 2521 2522 2523 2524
                out1 = self.elu(x)
                m = paddle.nn.ELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = elu(self.x_np, 1.0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2525 2526 2527

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
2528 2529
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2530 2531 2532 2533
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2534
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2535

2536 2537
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2538 2539 2540 2541
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2542
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2543

2544
    def test_errors(self):
W
wanghuancoder 已提交
2545 2546 2547 2548 2549
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.elu, 1)
                # The input dtype must be float16, float32, float64.
2550
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2551 2552 2553 2554
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.elu, x_int32)
                # support the input dtype is float16
2555
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2556 2557 2558
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.elu(x_fp16)
2559 2560


Z
zhupengyang 已提交
2561 2562 2563 2564 2565 2566 2567 2568 2569 2570
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)


2571 2572 2573 2574 2575 2576 2577 2578 2579
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2580
        self.init_shape()
2581

2582
        self.python_api = paddle.nn.functional.celu
2583
        np.random.seed(1024)
2584
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2585 2586 2587 2588 2589 2590
        alpha = 1.5
        out = celu(x, alpha)
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
        self.outputs = {'Out': out}

2591 2592 2593
    def init_shape(self):
        self.shape = [10, 12]

2594 2595 2596
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2597
        self.check_grad(['X'], 'Out')
2598 2599


2600 2601 2602 2603 2604
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2605 2606 2607 2608 2609
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2610 2611 2612
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2613
            else paddle.CPUPlace()
2614
        )
2615 2616 2617 2618 2619 2620
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
W
wanghuancoder 已提交
2621 2622
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
2623
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2624 2625 2626 2627 2628 2629 2630 2631
                out1 = self.celu(x, 1.5)
                m = paddle.nn.CELU(1.5)
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = celu(self.x_np, 1.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2632 2633 2634 2635 2636 2637 2638 2639 2640

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2641
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2642 2643 2644 2645 2646 2647 2648

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2649
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2650 2651

    def test_errors(self):
W
wanghuancoder 已提交
2652 2653 2654 2655 2656
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.celu, 1)
                # The input dtype must be float16, float32, float64.
2657
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2658 2659 2660 2661
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.celu, x_int32)
                # The alpha must be not equal 0
2662
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
2663 2664 2665 2666
                    name='x_fp32', shape=[10, 12], dtype='float32'
                )
                self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
                # support the input dtype is float16
2667
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2668 2669 2670
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.celu(x_fp16)
2671 2672


C
chengduo 已提交
2673
class TestReciprocal(TestActivation):
Q
qijun 已提交
2674 2675
    def setUp(self):
        self.op_type = "reciprocal"
2676
        self.python_api = paddle.reciprocal
2677
        self.init_dtype()
2678
        self.init_shape()
2679

2680
        np.random.seed(1024)
2681
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2682 2683 2684 2685
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2686 2687

    def test_check_grad(self):
2688 2689
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2690
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
2691 2692

    def test_check_output(self):
W
wanghuancoder 已提交
2693
        self.check_output()
Q
qijun 已提交
2694 2695


2696 2697 2698 2699 2700
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2701
class TestLog(TestActivation):
Q
qijun 已提交
2702 2703
    def setUp(self):
        self.op_type = "log"
2704
        self.prim_op_type = "prim"
2705
        self.python_api = paddle.log
2706
        self.public_python_api = paddle.log
2707
        self.init_dtype()
2708
        self.init_shape()
2709

2710 2711 2712 2713
        if len(self.shape) == 0:
            # for 0-D tensor, skip cinn testing
            self.enable_cinn = False

2714
        np.random.seed(1024)
2715
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2716 2717 2718 2719
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2720 2721

    def test_check_grad(self):
2722 2723
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2724
        self.check_grad(['X'], 'Out', check_prim=True)
Q
qijun 已提交
2725

2726
    def test_error(self):
W
wanghuancoder 已提交
2727 2728 2729 2730 2731 2732 2733 2734
        with paddle_static_guard():
            with paddle_static_guard():
                in1 = paddle.static.data(
                    name="in1", shape=[11, 17], dtype="int32"
                )
                in2 = paddle.static.data(
                    name="in2", shape=[11, 17], dtype="int64"
                )
2735

W
wanghuancoder 已提交
2736 2737
                self.assertRaises(TypeError, paddle.log, in1)
                self.assertRaises(TypeError, paddle.log, in2)
2738

2739

2740 2741
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
W
wanghuancoder 已提交
2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
        with paddle_static_guard():
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2753 2754


2755 2756 2757 2758 2759
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2760 2761 2762
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2763
        self.python_api = paddle.log2
J
joejiong 已提交
2764
        self.init_dtype()
2765
        self.init_shape()
J
joejiong 已提交
2766

2767
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2768 2769 2770 2771 2772 2773 2774 2775
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2776
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2777 2778

    def test_error(self):
W
wanghuancoder 已提交
2779 2780 2781
        with paddle_static_guard():
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2782

W
wanghuancoder 已提交
2783 2784
            self.assertRaises(TypeError, paddle.log2, in1)
            self.assertRaises(TypeError, paddle.log2, in2)
J
joejiong 已提交
2785 2786

    def test_api(self):
W
wanghuancoder 已提交
2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
        with paddle_static_guard():
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log2(data_x)
                exe = paddle.static.Executor(place=fluid.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log2(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2806 2807 2808 2809 2810 2811 2812 2813

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2814
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2815 2816


2817 2818 2819 2820 2821
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2822 2823 2824
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2825
        self.python_api = paddle.log10
J
joejiong 已提交
2826
        self.init_dtype()
2827
        self.init_shape()
J
joejiong 已提交
2828

2829
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2830 2831 2832 2833 2834 2835 2836 2837
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2838
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2839

2840 2841 2842 2843 2844 2845 2846

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2847
    def test_error(self):
W
wanghuancoder 已提交
2848 2849 2850
        with paddle_static_guard():
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2851

W
wanghuancoder 已提交
2852 2853
            self.assertRaises(TypeError, paddle.log10, in1)
            self.assertRaises(TypeError, paddle.log10, in2)
J
joejiong 已提交
2854 2855

    def test_api(self):
W
wanghuancoder 已提交
2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874
        with paddle_static_guard():
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log10(data_x)
                exe = paddle.static.Executor(place=paddle.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log10(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2875 2876 2877 2878 2879 2880 2881 2882

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2883
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2884 2885


2886 2887 2888
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2889
        self.python_api = paddle.log1p
2890
        self.init_dtype()
2891
        self.init_shape()
2892

2893
        np.random.seed(1024)
2894
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2895 2896 2897 2898 2899 2900 2901 2902
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2903
        self.check_grad(['X'], 'Out')
2904

2905

2906 2907
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
W
wanghuancoder 已提交
2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918
        with paddle_static_guard():
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2919 2920


2921 2922 2923 2924 2925 2926
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2927
    def test_api(self):
W
wanghuancoder 已提交
2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x",
                    shape=[11, 17],
                    dtype="float64",
                )

                out1 = paddle.log1p(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (res1,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log1p(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2947 2948 2949 2950 2951 2952 2953 2954

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
2955
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
2956 2957


C
chengduo 已提交
2958
class TestSquare(TestActivation):
Q
qijun 已提交
2959 2960
    def setUp(self):
        self.op_type = "square"
2961
        self.python_api = paddle.square
2962
        self.init_dtype()
2963
        self.init_shape()
2964

2965
        np.random.seed(1024)
2966
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2967 2968 2969 2970
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
2971 2972

    def test_check_grad(self):
2973 2974
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2975
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
2976 2977

    def test_check_output(self):
W
wanghuancoder 已提交
2978
        self.check_output()
Q
qijun 已提交
2979

2980

2981 2982 2983 2984 2985
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


2986 2987 2988
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
2989 2990 2991
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
2992
        self.python_api = paddle.square
2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3009
        self.check_output_with_place(place)
3010 3011 3012

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3013
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
3014 3015


C
chengduo 已提交
3016
class TestPow(TestActivation):
3017 3018
    def setUp(self):
        self.op_type = "pow"
3019
        self.prim_op_type = "comp"
3020
        self.python_api = paddle.pow
3021
        self.public_python_api = paddle.pow
3022
        self.init_dtype()
3023
        self.init_shape()
3024

3025
        np.random.seed(1024)
3026
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
3027 3028 3029
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
3030
        self.attrs = {'factor': 3.0}
3031
        self.outputs = {'Out': out}
3032

3033
    def test_check_output(self):
3034
        self.check_output(check_prim=True)
3035

3036
    def test_check_grad(self):
3037 3038
        if self.dtype == np.float16:
            return
3039
        self.check_grad(['X'], 'Out', check_prim=True)
3040

3041

3042 3043 3044 3045
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []

3046
    def setUp(self):
3047
        super().setUp()
3048 3049
        self.enable_cinn = False

3050

3051 3052 3053
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
3054
        self.python_api = paddle.pow
3055
        self.enable_cinn = False
3056 3057
        self.init_dtype()

3058
        np.random.seed(1024)
3059 3060 3061 3062 3063
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
W
wanghuancoder 已提交
3064
            'FactorTensor': np.array([3.0]).astype(self.dtype),
3065 3066 3067 3068 3069 3070
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
W
wanghuancoder 已提交
3071
        self.check_output()
3072 3073 3074 3075

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3076
        self.check_grad(['X'], 'Out')
3077 3078

    def test_api(self):
W
wanghuancoder 已提交
3079 3080 3081 3082 3083 3084 3085 3086
        with paddle_static_guard():
            input = np.random.uniform(1, 2, [11, 17]).astype("float32")
            x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
            res = paddle.static.data(
                name="res", shape=[11, 17], dtype="float32"
            )

            factor_1 = 2.0
3087
            factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
W
wanghuancoder 已提交
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099
            out_1 = paddle.pow(x, factor_1)
            out_2 = paddle.pow(x, factor_2)
            out_4 = paddle.pow(x, factor_1, name='pow_res')
            out_6 = paddle.pow(x, factor_2)
            self.assertEqual(('pow_res' in out_4.name), True)

            exe = fluid.Executor(place=fluid.CPUPlace())
            res_1, res_2, res, res_6 = exe.run(
                fluid.default_main_program(),
                feed={"x": input},
                fetch_list=[out_1, out_2, res, out_6],
            )
3100

W
wanghuancoder 已提交
3101 3102 3103
            assert np.allclose(res_1, np.power(input, 2))
            assert np.allclose(res_2, np.power(input, 3))
            assert np.allclose(res_6, np.power(input, 3))
3104 3105


3106 3107 3108 3109 3110
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3111
class TestSTanh(TestActivation):
3112 3113 3114 3115 3116 3117
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3118 3119
    def setUp(self):
        self.op_type = "stanh"
W
wanghuancoder 已提交
3120
        self.python_api = paddle.stanh
3121
        self.init_dtype()
3122 3123
        self.init_shape()

3124 3125
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3126

3127
        np.random.seed(1024)
3128
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3129 3130
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3131

3132
        self.inputs = {'X': x}
3133
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
3134
        self.outputs = {'Out': out}
3135

Q
qijun 已提交
3136
    def test_check_grad(self):
3137 3138
        if self.dtype == np.float16:
            return
3139
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3140

3141

3142 3143 3144 3145 3146 3147 3148 3149 3150 3151
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3152 3153 3154 3155 3156
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3170 3171 3172
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3173
            else paddle.CPUPlace()
3174
        )
3175 3176

    def test_static_api(self):
W
wanghuancoder 已提交
3177 3178
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
3179
                x = paddle.static.data('X', [10, 12])
W
wanghuancoder 已提交
3180 3181 3182 3183 3184 3185
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3186 3187 3188 3189 3190 3191

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3192
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3193 3194

    def test_fluid_api(self):
W
wanghuancoder 已提交
3195 3196
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
3197
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
3198 3199 3200 3201 3202
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3203

3204
    def test_errors(self):
W
wanghuancoder 已提交
3205 3206 3207 3208 3209
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.stanh, 1)
                # The input dtype must be float16, float32, float64.
3210
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3211 3212 3213 3214
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.stanh, x_int32)
                # support the input dtype is float16
3215
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3216 3217 3218
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.stanh(x_fp16)
3219 3220 3221 3222 3223 3224 3225 3226 3227 3228


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3229 3230


3231 3232
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3233 3234 3235 3236
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3237 3238 3239
    return out


C
chengduo 已提交
3240
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3241 3242
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3243
        self.python_api = paddle.nn.functional.softplus
3244
        self.init_dtype()
3245
        self.init_shape()
3246

3247 3248
        beta = 2
        threshold = 15
3249

3250
        np.random.seed(1024)
3251
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3252 3253 3254
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3255
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3256

3257 3258 3259
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3260
    def test_check_grad(self):
3261 3262
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3263
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
3264

3265

3266 3267 3268 3269 3270
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3271 3272 3273
@unittest.skipIf(
    not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
3274 3275 3276 3277
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()
W
wanghuancoder 已提交
3278
        self.python_api = paddle.nn.functional.softplus
3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3302 3303 3304 3305 3306
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3307
        np.random.seed(1024)
3308
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3309 3310 3311
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3312
            else paddle.CPUPlace()
3313
        )
3314 3315

    def test_static_api(self):
W
wanghuancoder 已提交
3316 3317
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
3318
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3319 3320 3321 3322 3323 3324 3325 3326
                out1 = F.softplus(x, self.beta, self.threshold)
                softplus = paddle.nn.Softplus(self.beta, self.threshold)
                out2 = softplus(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3327 3328 3329 3330 3331 3332 3333 3334

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3335
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3336 3337

    def test_errors(self):
W
wanghuancoder 已提交
3338 3339 3340 3341 3342
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softplus, 1)
                # The input dtype must be float16, float32, float64.
3343
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3344 3345 3346 3347
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softplus, x_int32)
                # support the input dtype is float16
3348
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3349 3350 3351
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softplus(x_fp16)
3352 3353 3354 3355 3356 3357 3358


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3359
class TestSoftsign(TestActivation):
3360 3361
    def setUp(self):
        self.op_type = "softsign"
3362
        self.init_dtype()
3363 3364
        self.init_shape()

3365
        self.python_api = paddle.nn.functional.softsign
3366

3367
        np.random.seed(1024)
3368
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3369 3370
        out = ref_softsign(x)
        self.inputs = {'X': x}
3371
        self.outputs = {'Out': out}
3372

3373 3374 3375
    def init_shape(self):
        self.shape = [10, 12]

3376
    def test_check_grad(self):
3377 3378
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3379
        self.check_grad(['X'], 'Out')
3380 3381


3382 3383 3384 3385 3386
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3387 3388 3389
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3390
        np.random.seed(1024)
3391
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3392 3393 3394
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3395
            else paddle.CPUPlace()
3396
        )
3397 3398

    def test_static_api(self):
W
wanghuancoder 已提交
3399 3400
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
3401
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3402 3403 3404 3405 3406 3407 3408 3409
                out1 = F.softsign(x)
                softsign = paddle.nn.Softsign()
                out2 = softsign(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softsign(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3410 3411 3412 3413 3414 3415 3416 3417

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3418
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3419 3420

    def test_errors(self):
W
wanghuancoder 已提交
3421 3422 3423 3424 3425
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softsign, 1)
                # The input dtype must be float16, float32, float64.
3426
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3427 3428 3429 3430
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softsign, x_int32)
                # support the input dtype is float16
3431
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3432 3433 3434
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softsign(x_fp16)
3435 3436


3437 3438 3439 3440 3441
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3442
class TestThresholdedRelu(TestActivation):
3443 3444
    def setUp(self):
        self.op_type = "thresholded_relu"
3445
        self.init_dtype()
3446
        self.init_shape()
W
wanghuancoder 已提交
3447
        self.python_api = paddle.nn.functional.thresholded_relu
3448

3449
        threshold = 15
3450

3451
        np.random.seed(1024)
3452
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3453 3454 3455 3456
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
        self.inputs = {'X': x}
        self.attrs = {"threshold": threshold}
3457
        self.outputs = {'Out': out}
3458

3459 3460 3461
    def init_shape(self):
        self.shape = [10, 12]

3462
    def test_check_grad(self):
3463 3464
        if self.dtype == np.float16:
            return
3465
        self.check_grad(['X'], 'Out')
3466 3467


3468 3469 3470 3471 3472
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3473 3474 3475 3476 3477 3478 3479
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3480 3481 3482
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3483
            else paddle.CPUPlace()
3484
        )
3485 3486

    def test_static_api(self):
W
wanghuancoder 已提交
3487 3488
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
3489
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3490 3491 3492 3493 3494 3495 3496 3497
                out1 = F.thresholded_relu(x, self.threshold)
                thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
                out2 = thresholded_relu(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3498 3499 3500 3501 3502 3503 3504 3505

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3506
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3507

3508
    def test_errors(self):
W
wanghuancoder 已提交
3509 3510 3511 3512 3513
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.thresholded_relu, 1)
                # The input dtype must be float16, float32, float64.
3514
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3515 3516 3517 3518
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.thresholded_relu, x_int32)
                # support the input dtype is float16
3519
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3520 3521 3522
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.thresholded_relu(x_fp16)
3523 3524


3525
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3526
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3527 3528


C
chengduo 已提交
3529
class TestHardSigmoid(TestActivation):
3530 3531
    def setUp(self):
        self.op_type = "hard_sigmoid"
3532 3533 3534 3535
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3536
        self.init_shape()
W
wanghuancoder 已提交
3537
        self.python_api = paddle.nn.functional.hardsigmoid
3538

3539
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3540
        lower_threshold = -self.offset / self.slope
3541
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3542

3543
        # Same reason as TestAbs
3544 3545 3546
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3547

3548
        out = ref_hardsigmoid(x, self.slope, self.offset)
3549

3550 3551
        self.attrs = {'slope': self.slope, 'offset': self.offset}
        self.inputs = {'X': x}
3552
        self.outputs = {'Out': out}
3553

3554 3555 3556
    def init_shape(self):
        self.shape = [10, 12]

3557 3558
    def set_attrs(self):
        pass
3559

3560

3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3572 3573 3574 3575 3576
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3577 3578 3579 3580
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3581 3582 3583
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3584
            else paddle.CPUPlace()
3585
        )
3586 3587

    def test_static_api(self):
W
wanghuancoder 已提交
3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.hardsigmoid(x)
                m = paddle.nn.Hardsigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardsigmoid(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3599 3600 3601 3602 3603 3604 3605 3606

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3607
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3608 3609

    def test_fluid_api(self):
W
wanghuancoder 已提交
3610 3611
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
3612
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3613 3614 3615 3616 3617 3618
                out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

3619
        paddle.disable_static(self.place)
3620
        x = paddle.to_tensor(self.x_np)
3621
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3622
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3623 3624

    def test_errors(self):
W
wanghuancoder 已提交
3625 3626 3627 3628 3629
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardsigmoid, 1)
                # The input dtype must be float16, float32, float64.
3630
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3631 3632 3633 3634
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardsigmoid, x_int32)
                # support the input dtype is float16
3635
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3636 3637 3638
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardsigmoid(x_fp16)
3639 3640


3641 3642 3643 3644 3645
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3646
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3647 3648
    def setUp(self):
        self.op_type = "swish"
3649
        self.python_api = paddle.nn.functional.swish
3650
        self.init_dtype()
3651 3652
        self.init_shape()

3653
        np.random.seed(1024)
3654
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3655 3656
        out = ref_swish(x)
        self.inputs = {'X': x}
H
hong19860320 已提交
3657
        self.attrs = {'beta': 1.0}
3658
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
3659

3660 3661 3662
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3663
    def test_check_grad(self):
3664 3665
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3666 3667 3668 3669
        self.check_grad(
            ['X'],
            'Out',
        )
3670

A
Abhinav Arora 已提交
3671

3672 3673 3674 3675 3676
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3677 3678 3679 3680 3681
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3682 3683 3684
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3685
            else paddle.CPUPlace()
3686
        )
3687 3688

    def test_static_api(self):
W
wanghuancoder 已提交
3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.swish(x)
                swish = paddle.nn.Swish()
                out2 = swish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_swish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3700

3701
    def test_dygraph_api(self):
3702 3703 3704 3705 3706 3707
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3708
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3709 3710

    def test_fluid_api(self):
W
wanghuancoder 已提交
3711 3712
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
3713
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3714 3715 3716 3717 3718
                out = paddle.nn.functional.swish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_swish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3719

3720
    def test_errors(self):
W
wanghuancoder 已提交
3721 3722 3723 3724 3725
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.swish, 1)
                # The input dtype must be float16, float32, float64.
3726
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3727 3728 3729 3730
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.swish, x_int32)
                # support the input dtype is float16
3731
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3732 3733 3734
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.swish(x_fp16)
3735 3736


3737 3738 3739 3740
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3741 3742 3743 3744 3745 3746
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3747
        self.python_api = paddle.nn.functional.mish
3748
        self.init_dtype()
3749
        self.init_shape()
3750 3751

        np.random.seed(1024)
3752
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3753 3754 3755 3756
        out = ref_mish(x)
        self.inputs = {'X': x}
        self.outputs = {'Out': out}

3757 3758 3759
    def init_shape(self):
        self.shape = [10, 12]

3760
    def test_check_output(self):
W
wanghuancoder 已提交
3761
        self.check_output()
3762

3763 3764 3765
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3766
        self.check_grad(['X'], 'Out')
3767 3768


3769 3770 3771 3772 3773
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3774 3775 3776 3777 3778
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3779 3780 3781
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3782
            else paddle.CPUPlace()
3783
        )
3784 3785

    def test_static_api(self):
W
wanghuancoder 已提交
3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.mish(x)
                mish = paddle.nn.Mish()
                out2 = mish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_mish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3797 3798 3799 3800 3801 3802 3803 3804

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3805
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3806 3807

    def test_fluid_api(self):
W
wanghuancoder 已提交
3808 3809
        with paddle_static_guard():
            with fluid.program_guard(fluid.Program()):
3810
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3811 3812 3813 3814 3815
                out = paddle.nn.functional.mish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_mish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3816 3817

    def test_errors(self):
W
wanghuancoder 已提交
3818 3819 3820 3821 3822
        with paddle_static_guard():
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.mish, 1)
                # The input dtype must be float16, float32, float64.
3823
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3824 3825 3826 3827
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.mish, x_int32)
                # support the input dtype is float16
3828
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3829 3830 3831
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.mish(x_fp16)
3832 3833


3834
# ------------------ Test Cudnn Activation----------------------
3835
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3836 3837 3838
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3854 3855
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
3856 3857 3858 3859 3860 3861
    parent,
    atol=1e-3,
    grad_check=True,
    check_prim=False,
    enable_cinn=True,
    grad_atol=0.80,
3862 3863 3864 3865
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3866 3867 3868
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
3869

3870
        def if_enable_cinn(self):
3871 3872
            self.enable_cinn = enable_cinn

C
chengduo 已提交
3873
        def test_check_output(self):
3874
            place = core.CUDAPlace(0)
C
chengduo 已提交
3875 3876
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
3877 3878 3879
                self.check_output_with_place(
                    place, atol=atol, check_prim=check_prim
                )
3880

C
chengduo 已提交
3881 3882 3883 3884
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3885
                self.check_grad_with_place(
3886 3887 3888 3889 3890
                    place,
                    ['X'],
                    'Out',
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
3891
                )
C
chengduo 已提交
3892 3893 3894 3895 3896 3897

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


3898
create_test_act_fp16_class(TestActivation, check_prim=True)
R
ronnywang 已提交
3899
create_test_act_fp16_class(TestExpm1)
3900 3901
create_test_act_fp16_class(TestSigmoid, check_prim=True)
create_test_act_fp16_class(TestSilu, check_prim=True)
C
chengduo 已提交
3902 3903
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
3904
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3905
create_test_act_fp16_class(TestHardShrink)
3906
create_test_act_fp16_class(TestSoftshrink)
C
chengduo 已提交
3907
create_test_act_fp16_class(TestSqrt)
M
mhy-666 已提交
3908
create_test_act_fp16_class(TestSqrtComp, check_prim=True)
3909
create_test_act_fp16_class(TestAbs, check_prim=True)
C
chengduo 已提交
3910
create_test_act_fp16_class(TestCeil, grad_check=False)
3911
create_test_act_fp16_class(TestFloor, check_prim=True, grad_check=False)
C
chengduo 已提交
3912
create_test_act_fp16_class(TestCos, grad_atol=0.85)
J
joejiong 已提交
3913
create_test_act_fp16_class(TestTan, grad_atol=0.85)
3914
create_test_act_fp16_class(TestCosh, grad_atol=0.85)
3915
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
3916
create_test_act_fp16_class(TestSin)
3917
create_test_act_fp16_class(TestSinh)
3918 3919
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
X
xiaoting 已提交
3920 3921 3922
create_test_act_fp16_class(TestAcosh, grad_atol=0.85)
create_test_act_fp16_class(TestAsinh, grad_atol=0.85)
create_test_act_fp16_class(TestAtanh, grad_atol=0.85)
C
chengduo 已提交
3923
create_test_act_fp16_class(TestRound, grad_check=False)
K
Kang Zhao 已提交
3924
create_test_act_fp16_class(TestRelu, check_prim=True)
C
cxxly 已提交
3925
create_test_act_fp16_class(TestGelu, check_prim=True, enable_cinn=False)
C
chengduo 已提交
3926 3927
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
3928
create_test_act_fp16_class(TestSoftRelu, grad_atol=0.85)
C
chengduo 已提交
3929
create_test_act_fp16_class(TestELU)
3930
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
3931
create_test_act_fp16_class(TestReciprocal)
3932
create_test_act_fp16_class(TestLog, check_prim=True)
3933 3934 3935 3936
if core.is_compiled_with_rocm():
    create_test_act_fp16_class(TestLog2, atol=5e-2, grad_atol=0.85)
else:
    create_test_act_fp16_class(TestLog2, atol=5e-2)
J
joejiong 已提交
3937
create_test_act_fp16_class(TestLog10, atol=5e-2)
3938
create_test_act_fp16_class(TestLog1p, grad_atol=0.9)
C
chengduo 已提交
3939
create_test_act_fp16_class(TestSquare)
3940
create_test_act_fp16_class(TestPow, check_prim=True, atol=5e-2)
3941
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
3942 3943 3944 3945 3946
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
3947
create_test_act_fp16_class(TestSwish, grad_atol=0.85)
3948
create_test_act_fp16_class(TestHardSwish, check_prim=True)
3949
create_test_act_fp16_class(TestMish, grad_atol=0.9)
A
Abhinav Arora 已提交
3950

3951

3952 3953 3954 3955 3956 3957
def create_test_act_bf16_class(
    parent, atol=1e-2, grad_check=True, grad_atol=0.80
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3958 3959 3960 3961 3962 3963 3964 3965 3966 3967
    class TestActBF16(parent):
        def init_dtype(self):
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
            self.check_output_with_place(place, atol=atol)

        def test_check_grad(self):
            place = core.CUDAPlace(0)
3968 3969 3970
            self.check_grad_with_place(
                place, ['X'], 'Out', max_relative_error=grad_atol
            )
3971 3972 3973 3974 3975 3976 3977

    cls_name = "{0}_{1}".format(parent.__name__, "bf16")
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


create_test_act_bf16_class(TestRelu)
3978
create_test_act_bf16_class(TestAbs)
3979

Q
qijun 已提交
3980 3981
if __name__ == "__main__":
    unittest.main()