test_activation_op.py 133.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16
21 22
from scipy.special import erf, expit

23
import paddle
24
import paddle.nn.functional as F
25 26
from paddle import fluid, static
from paddle.fluid import Program, core, program_guard
27
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
28 29


30
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
31
    def test_errors(self):
32
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
            with program_guard(Program(), Program()):
                # The input type of sqrt op must be Variable or numpy.ndarray.
                in1 = 1
                self.assertRaises(TypeError, paddle.sqrt, in1)
                # The input dtype of sqrt op must be float16, float32, float64.
                in2 = paddle.static.data(
                    name='input2', shape=[-1, 12, 10], dtype="int32"
                )
                self.assertRaises(TypeError, paddle.sqrt, in2)

                in3 = paddle.static.data(
                    name='input3', shape=[-1, 12, 10], dtype="float16"
                )
                paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
47 48


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
55
        self.if_enable_cinn()
C
chentianyu03 已提交
56
        self.python_api = paddle.exp
57
        self.public_python_api = paddle.exp
58

59
        np.random.seed(2049)
60
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
61 62 63 64
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
65

66 67
        self.convert_input_output()

Q
qijun 已提交
68
    def test_check_output(self):
W
wanghuancoder 已提交
69
        self.check_output()
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
74 75 76 77
        self.check_grad(
            ['X'],
            'Out',
        )
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

88 89 90
    def convert_input_output(self):
        pass

91 92 93
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
94

95 96 97 98 99
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


100
class TestExpFp32_Prim(OpTest):
101 102 103 104 105 106
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp
107
        self.public_python_api = paddle.exp
108 109 110 111 112 113 114

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
115
        self.if_enable_cinn()
116
        self.convert_input_output()
117 118 119 120 121 122 123 124 125 126 127 128 129

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

130
    def if_enable_cinn(self):
131
        pass
132

133 134 135
    def convert_input_output(self):
        pass

136

137
class TestExpFp64_Prim(TestExpFp32_Prim):
138 139 140 141
    def init_dtype(self):
        self.dtype = np.float64


142
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
143 144 145 146
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
147 148 149
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
150
        self.python_api = paddle.expm1
R
ronnywang 已提交
151
        self.init_dtype()
152
        self.init_shape()
R
ronnywang 已提交
153 154

        np.random.seed(2049)
155
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
156 157 158 159
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
160
        self.convert_input_output()
R
ronnywang 已提交
161 162

    def test_check_grad(self):
W
wanghuancoder 已提交
163
        self.check_grad(['X'], 'Out')
164 165

    def test_check_output(self):
W
wanghuancoder 已提交
166
        self.check_output()
R
ronnywang 已提交
167 168


169 170 171 172 173
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        def run(place):
190
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
191
                with paddle.static.program_guard(paddle.static.Program()):
192
                    X = paddle.static.data('X', self.shape, dtype=self.dtype)
W
wanghuancoder 已提交
193 194 195
                    out = paddle.expm1(X)
                    exe = paddle.static.Executor(place)
                    res = exe.run(feed={'X': self.x})
R
ronnywang 已提交
196
            for r in res:
197
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
198 199 200 201 202 203 204 205

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
206
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
207 208 209 210 211

        for place in self.place:
            run(place)

    def test_errors(self):
212
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
213
            with paddle.static.program_guard(paddle.static.Program()):
214
                X = paddle.static.data('X', self.shape, dtype='int32')
W
wanghuancoder 已提交
215
                self.assertRaises(TypeError, paddle.expm1, X)
R
ronnywang 已提交
216 217 218
        # The input dtype must be float16, float32, float64.


219
class TestParameter:
220
    def test_out_name(self):
221
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
222 223 224 225 226 227 228 229 230 231 232
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = eval("paddle.%s(data, name='Y')" % self.op_type)
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = eval("np.%s(np_x)" % self.op_type)
                np.testing.assert_allclose(result, expected, rtol=1e-05)
233 234 235 236 237 238 239

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
240
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
241 242


C
chengduo 已提交
243
class TestSigmoid(TestActivation):
Q
qijun 已提交
244 245
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
246 247
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
248
        self.public_python_api = paddle.nn.functional.sigmoid
249
        self.init_dtype()
250
        self.init_shape()
251
        self.if_enable_cinn()
252
        np.random.seed(1024)
253
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
254 255 256 257
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
258

259 260
        self.convert_input_output()

261 262 263
    def init_dtype(self):
        self.dtype = np.float32

264 265 266
    def if_enable_cinn(self):
        pass

267
    def test_check_grad(self):
268 269
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
270
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
271

272

273 274 275 276 277
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []


278
@unittest.skipIf(
R
ronnywang 已提交
279 280
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
281
)
282 283 284
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
285 286
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
287
        self.public_python_api = paddle.nn.functional.sigmoid
288
        self.init_dtype()
289
        self.init_shape()
290
        self.if_enable_cinn()
291
        np.random.seed(1024)
292
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
293 294 295 296 297 298 299 300 301 302
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

303 304 305
    def init_shape(self):
        self.shape = [11, 17]

306 307 308
    def if_enable_cinn(self):
        self.enable_cinn = False

309 310
    def test_check_output(self):
        place = core.CUDAPlace(0)
311
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
312
        self.check_output_with_place(place, check_prim=True)
313 314 315

    def test_check_grad(self):
        place = core.CUDAPlace(0)
316
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
317 318


319 320 321 322 323 324 325 326
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
327 328 329
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
330 331
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.silu
332
        self.public_python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
333
        self.init_dtype()
334
        self.init_shape()
335
        self.if_enable_cinn()
M
minghaoBD 已提交
336 337

        np.random.seed(1024)
338
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
339
        out = x / (np.exp(-x) + 1)
340
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
M
minghaoBD 已提交
341 342
        self.outputs = {'Out': out}

343 344
        self.convert_input_output()

M
minghaoBD 已提交
345 346 347
    def init_dtype(self):
        self.dtype = np.float32

348
    def if_enable_cinn(self):
349 350
        pass

M
minghaoBD 已提交
351
    def test_check_grad(self):
Z
zxcd 已提交
352
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
353 354


355 356 357
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
358 359


M
minghaoBD 已提交
360 361 362 363
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
364 365 366
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
367
            else paddle.CPUPlace()
368
        )
M
minghaoBD 已提交
369 370

    def test_static_api(self):
371
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
372
            with paddle.static.program_guard(paddle.static.Program()):
373
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
374 375 376 377 378 379 380 381
                out1 = F.silu(x)
                m = paddle.nn.Silu()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = self.x_np / (1 + np.exp(-self.x_np))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
382 383 384 385 386 387 388 389

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
390
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
391 392

    def test_errors(self):
393
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
394 395 396 397
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.silu, 1)
                # The input dtype must be float16, float32, float64.
398
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
399 400 401 402
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.silu, x_int32)
                # support the input dtype is float16
403
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
404 405 406
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.silu(x_fp16)
M
minghaoBD 已提交
407 408


C
chengduo 已提交
409
class TestLogSigmoid(TestActivation):
410 411
    def setUp(self):
        self.op_type = "logsigmoid"
W
wanghuancoder 已提交
412
        self.python_api = paddle.nn.functional.log_sigmoid
413
        self.init_dtype()
414
        self.init_shape()
415

416
        np.random.seed(2048)
417
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
418
        out = np.log(1 / (1 + np.exp(-x)))
419
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
420
        self.outputs = {'Out': out}
421

422 423
        self.convert_input_output()

424
    def test_check_grad(self):
425 426
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
427
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
428 429


430 431 432 433 434
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


435
class TestLogSigmoidAPI(unittest.TestCase):
436
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
437
    def setUp(self):
438
        np.random.seed(1024)
439
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
440 441 442
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
443
            else paddle.CPUPlace()
444
        )
445 446

    def test_static_api(self):
447
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
448
            with paddle.static.program_guard(paddle.static.Program()):
449
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
450 451 452 453 454 455 456 457
                out1 = F.log_sigmoid(x)
                m = paddle.nn.LogSigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
458 459 460

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
461
        out1 = F.log_sigmoid(x)
462 463 464 465
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
466
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
467 468

    def test_errors(self):
469
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
470 471 472 473
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.log_sigmoid, 1)
                # The input dtype must be float16, float32, float64.
474
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
475 476 477 478
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.log_sigmoid, x_int32)
                # support the input dtype is float16
479
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
480 481 482
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.log_sigmoid(x_fp16)
483 484


485
class TestTanh(TestActivation, TestParameter):
486 487
    def setUp(self):
        self.op_type = "tanh"
488
        self.prim_op_type = "prim"
W
wanghuancoder 已提交
489
        self.python_api = paddle.tanh
490
        self.public_python_api = paddle.tanh
491
        self.init_dtype()
492
        self.init_shape()
493
        self.if_enable_cinn()
494

495
        np.random.seed(1024)
496
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
497 498 499
        out = np.tanh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
500
        self.convert_input_output()
501 502

    def test_check_grad(self):
503 504
        if self.dtype == np.float16:
            return
505
        self.check_grad(['X'], 'Out', check_prim=True)
506

507
    def init_dtype(self):
508
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
509 510 511 512
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

513 514 515
    def if_enable_cinn(self):
        pass

516

517 518 519 520 521
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []


W
WangXi 已提交
522 523 524 525
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
526
        np.random.seed(1024)
W
WangXi 已提交
527
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
528 529 530
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
531
            else paddle.CPUPlace()
532
        )
533 534 535 536
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
537 538

    def test_static_api(self):
539
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
540
            with paddle.static.program_guard(paddle.static.Program()):
541
                x = paddle.static.data('X', [10, 12], self.dtype)
W
wanghuancoder 已提交
542 543 544 545 546 547 548 549
                out1 = self.tanh(x)
                th = paddle.nn.Tanh()
                out2 = th(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.tanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
550 551

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
552
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
553 554 555 556 557 558
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
559
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
560 561

    def test_errors(self):
562
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
563 564 565 566
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.tanh, 1)
                # The input dtype must be float16, float32.
567
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
568 569 570 571
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, self.tanh, x_int32)
                # support the input dtype is float16
572
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
573 574 575
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                self.tanh(x_fp16)
576 577 578 579 580 581


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
582 583


584
class TestAtan(TestActivation, TestParameter):
585 586
    def setUp(self):
        self.op_type = "atan"
W
wanghuancoder 已提交
587
        self.python_api = paddle.atan
588
        self.init_dtype()
589
        self.init_shape()
590

591
        np.random.seed(1024)
592
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
593 594 595 596
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
597
        self.convert_input_output()
598 599 600 601

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
602
        self.check_grad(['X'], 'Out')
603

W
WuHaobo 已提交
604
    def test_out_name(self):
605
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
606 607 608 609 610 611 612 613 614 615 616
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = paddle.atan(data, name='Y')
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = np.arctan(np_x)
                self.assertEqual(result, expected)
W
WuHaobo 已提交
617

618 619 620 621 622 623 624 625
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

626

627
class TestAtan_ZeroDim(TestAtan):
628 629 630 631
    def init_shape(self):
        self.shape = []


632 633 634
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
W
wanghuancoder 已提交
635
        self.python_api = paddle.sinh
636
        self.init_dtype()
637
        self.init_shape()
638

639
        np.random.seed(1024)
640
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
641 642 643 644
        out = np.sinh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

645 646
        self.convert_input_output()

647 648 649 650 651
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

652 653 654 655 656 657 658

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
659 660 661 662
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
663
            z = paddle.sinh(x).numpy()
664
            z_expected = np.sinh(np_x)
665
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
666 667

    def test_api(self):
668
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_sinh_out = paddle.sinh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_sinh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_sinh_out],
                )

            expected_res = np.sinh(input_x)
            np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
691 692 693 694

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
695 696 697
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
698 699
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
700
            loss = paddle.sinh(var)
701 702 703 704 705 706 707
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
708
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
709 710 711 712
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.sinh, 1)
                # The input dtype must be float16, float32, float64.
713
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
714 715 716 717
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.sinh, x_int32)
                # support the input dtype is float16
718
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
719 720 721
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.sinh(x_fp16)
722 723 724 725 726


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
W
wanghuancoder 已提交
727
        self.python_api = paddle.cosh
728
        self.init_dtype()
729
        self.init_shape()
730

731
        np.random.seed(1024)
732
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
733 734 735 736
        out = np.cosh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

737 738
        self.convert_input_output()

739 740 741 742 743
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

744 745 746 747 748 749 750

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
751 752 753 754
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
755
            z = paddle.cosh(x).numpy()
756
            z_expected = np.cosh(np_x)
757
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
758 759

    def test_api(self):
760
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_cosh_out = paddle.cosh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_cosh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_cosh_out],
                )

            expected_res = np.cosh(input_x)
            np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
783 784 785 786

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
787 788 789
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
790 791
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
792
            loss = paddle.cosh(var)
793 794 795 796 797 798 799
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
800
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
801 802 803 804
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.cosh, 1)
                # The input dtype must be float16, float32, float64.
805
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
806 807 808 809
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.cosh, x_int32)
                # support the input dtype is float16
810
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
811 812 813
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.cosh(x_fp16)
814 815


816 817 818 819 820 821
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
822 823
    def setUp(self):
        self.op_type = "tanh_shrink"
W
wanghuancoder 已提交
824
        self.python_api = paddle.nn.functional.tanhshrink
825
        self.init_dtype()
826
        self.init_shape()
827

828
        np.random.seed(1024)
829
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
830
        out = ref_tanhshrink(x)
831
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
832
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
833

834 835
        self.convert_input_output()

K
Kavya Srinet 已提交
836
    def test_check_grad(self):
837 838
        if self.dtype == np.float16:
            return
839
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
840

841

842 843 844 845 846
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


847 848 849
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
850
        np.random.seed(1024)
851
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
852 853 854
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
855
            else paddle.CPUPlace()
856
        )
857 858

    def test_static_api(self):
859
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
860
            with paddle.static.program_guard(paddle.static.Program()):
861
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
862 863 864 865 866 867 868 869
                out1 = F.tanhshrink(x)
                tanhshrink = paddle.nn.Tanhshrink()
                out2 = tanhshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_tanhshrink(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
870 871 872 873 874 875 876 877

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
878
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
879 880

    def test_errors(self):
881
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
882 883 884 885
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.tanhshrink, 1)
                # The input dtype must be float16, float32, float64.
886
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
887 888 889 890
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.tanhshrink, x_int32)
                # support the input dtype is float16
891
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
892 893 894
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.tanhshrink(x_fp16)
895 896


897 898 899 900 901 902
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
903
class TestHardShrink(TestActivation):
904 905
    def setUp(self):
        self.op_type = "hard_shrink"
W
wanghuancoder 已提交
906
        self.python_api = paddle.nn.functional.hardshrink
907
        self.init_dtype()
908
        self.init_shape()
909

910 911
        self.threshold = 0.5
        self.set_attrs()
912
        np.random.seed(1024)
913
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
914
        out = ref_hardshrink(x, self.threshold)
915 916
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
917

918
        self.attrs = {'threshold': self.threshold}
919 920

        self.convert_input_output()
921

922 923 924
    def init_shape(self):
        self.shape = [10, 12]

925 926 927
    def set_attrs(self):
        pass

928
    def test_check_grad(self):
929 930
        if self.dtype == np.float16:
            return
931
        self.check_grad(['X'], 'Out')
932 933


934 935 936 937 938
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


939 940 941 942 943 944 945 946
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


947 948 949
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
950
        np.random.seed(1024)
951
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
952 953 954
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
955
            else paddle.CPUPlace()
956
        )
957 958

    def test_static_api(self):
959
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
960
            with paddle.static.program_guard(paddle.static.Program()):
961
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
962 963 964 965 966 967 968 969
                out1 = F.hardshrink(x)
                hd = paddle.nn.Hardshrink()
                out2 = hd(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
970 971

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
972
        x = paddle.to_tensor(self.x_np)
973 974 975 976 977
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
978
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
979 980 981 982 983 984

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
985
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
986

987
    def test_errors(self):
988
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
989 990 991 992
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardshrink, 1)
                # The input dtype must be float16, float32, float64.
993
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
994 995 996 997
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardshrink, x_int32)
                # support the input dtype is float16
998
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
999 1000 1001
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardshrink(x_fp16)
1002 1003


1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
1015
        np.random.seed(1024)
1016
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
1017 1018 1019
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1020
            else paddle.CPUPlace()
1021
        )
1022 1023

    def test_static_api(self):
1024
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1025
            with paddle.static.program_guard(paddle.static.Program()):
1026
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1027 1028 1029 1030 1031 1032 1033 1034
                out1 = F.hardtanh(x)
                m = paddle.nn.Hardtanh()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardtanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1035 1036

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
1037
        x = paddle.to_tensor(self.x_np)
1038 1039 1040 1041 1042
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1043
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1044 1045 1046 1047 1048 1049

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1050
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1051 1052

    def test_errors(self):
1053
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1054 1055 1056 1057
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardtanh, 1)
                # The input dtype must be float16, float32, float64.
1058
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1059 1060 1061 1062
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardtanh, x_int32)
                # support the input dtype is float16
1063
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1064 1065 1066
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardtanh(x_fp16)
1067 1068


1069 1070 1071
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1072 1073
        out - threshold
    )
1074 1075 1076 1077
    return out


class TestSoftshrink(TestActivation):
1078 1079
    def setUp(self):
        self.op_type = "softshrink"
1080
        self.python_api = paddle.nn.functional.softshrink
1081
        self.init_dtype()
1082
        self.init_shape()
1083

1084
        threshold = 0.8
1085

1086
        np.random.seed(1023)
1087
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1088
        out = ref_softshrink(x, threshold)
1089 1090

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1091
        self.outputs = {'Out': out}
1092

1093 1094
        self.attrs = {"lambda": threshold}

1095
    def test_check_grad(self):
1096 1097
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1098
        self.check_grad(['X'], 'Out')
1099

1100

1101 1102 1103 1104 1105
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1106 1107 1108 1109
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1110
        np.random.seed(1024)
1111
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1112 1113 1114
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1115
            else paddle.CPUPlace()
1116
        )
1117 1118

    def test_static_api(self):
1119
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1120
            with paddle.static.program_guard(paddle.static.Program()):
1121
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
1122 1123 1124 1125 1126 1127 1128 1129
                out1 = F.softshrink(x, self.threshold)
                softshrink = paddle.nn.Softshrink(self.threshold)
                out2 = softshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1130 1131 1132 1133 1134 1135 1136 1137

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1138
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1139

1140
    def test_errors(self):
1141
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1142 1143 1144 1145
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softshrink, 1)
                # The input dtype must be float16, float32, float64.
1146
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1147 1148 1149 1150
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softshrink, x_int32)
                # The threshold must be no less than zero
1151
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
1152 1153 1154 1155
                    name='x_fp32', shape=[12, 10], dtype='float32'
                )
                self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
                # support the input dtype is float16
1156
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1157 1158 1159
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softshrink(x_fp16)
1160 1161


1162
class TestSqrt(TestActivation, TestParameter):
1163 1164
    def setUp(self):
        self.op_type = "sqrt"
1165
        self.prim_op_type = "prim"
1166
        self.python_api = paddle.sqrt
1167 1168
        self.public_python_api = paddle.sqrt

1169
        self.init_dtype()
1170
        self.init_shape()
1171
        self.if_enable_cinn()
1172

1173
        np.random.seed(1023)
1174
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1175 1176 1177 1178
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1179
        self.convert_input_output()
1180

1181 1182 1183
    def if_enable_cinn(self):
        pass

1184
    def test_check_grad(self):
1185 1186
        if self.dtype == np.float16:
            return
1187
        self.check_grad(['X'], 'Out', check_prim=True)
1188 1189

    def test_check_output(self):
W
wanghuancoder 已提交
1190
        self.check_output()
1191

1192

1193 1194 1195 1196 1197
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
1198
        self.public_python_api = paddle.sqrt
1199 1200
        self.init_dtype()
        self.init_shape()
1201
        self.if_enable_cinn()
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1212
        self.check_grad(['X'], 'Out', check_prim=True)
1213 1214

    def test_check_output(self):
W
wanghuancoder 已提交
1215
        self.check_output()
1216 1217 1218 1219

    def init_dtype(self):
        self.dtype = np.float32

1220 1221 1222
    def if_enable_cinn(self):
        pass

1223

1224 1225 1226 1227
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []

1228

1229
@unittest.skipIf(
R
ronnywang 已提交
1230 1231
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
1232
)
1233 1234 1235
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1236
        self.prim_op_type = "prim"
1237
        self.python_api = paddle.sqrt
1238
        self.public_python_api = paddle.sqrt
1239
        self.init_dtype()
1240
        self.init_shape()
1241
        self.if_enable_cinn()
1242 1243

        np.random.seed(1023)
1244
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1255 1256 1257
    def init_shape(self):
        self.shape = [11, 17]

1258 1259 1260
    def if_enable_cinn(self):
        self.enable_cinn = False

1261 1262
    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1263
        self.check_output_with_place(place)
1264 1265 1266

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1267
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
1268 1269


M
mhy-666 已提交
1270 1271 1272 1273 1274
class TestSqrtComp(TestActivation, TestParameter):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1275
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1276 1277
        self.init_dtype()
        self.init_shape()
1278
        self.if_enable_cinn()
M
mhy-666 已提交
1279 1280 1281 1282 1283 1284 1285

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1286
        self.convert_input_output()
1287 1288 1289

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)


class TestSqrtCompFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1305
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1306 1307
        self.init_dtype()
        self.init_shape()
1308
        self.if_enable_cinn()
M
mhy-666 已提交
1309 1310 1311 1312 1313 1314
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1315 1316 1317

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32


Z
zhoukunsheng 已提交
1331 1332 1333
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
1334
        self.prim_op_type = "comp"
Z
zyfncg 已提交
1335
        self.python_api = paddle.rsqrt
1336
        self.public_python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1337
        self.init_dtype()
1338
        self.init_shape()
1339
        self.if_enable_cinn()
Z
zhoukunsheng 已提交
1340

1341
        np.random.seed(1024)
1342
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
Z
zhoukunsheng 已提交
1343 1344 1345 1346
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1347
        self.convert_input_output()
Z
zhoukunsheng 已提交
1348

1349 1350 1351
    def init_shape(self):
        self.shape = [10, 12]

1352 1353 1354
    def if_enable_cinn(self):
        pass

1355 1356 1357
    def test_check_output(self):
        self.check_output(check_prim=True)

Z
zhoukunsheng 已提交
1358 1359 1360
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1361 1362 1363 1364 1365 1366
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.0005,
            check_prim=True,
        )
Z
zhoukunsheng 已提交
1367 1368


1369 1370 1371
class TestRsqrt_ZeroDim(TestRsqrt):
    def init_shape(self):
        self.shape = []
1372 1373 1374

    def if_enable_cinn(self):
        self.enable_cinn = False
1375 1376


C
chengduo 已提交
1377
class TestAbs(TestActivation):
1378 1379
    def setUp(self):
        self.op_type = "abs"
1380 1381
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
1382
        self.public_python_api = paddle.abs
1383
        self.init_dtype()
1384
        self.init_shape()
1385
        self.if_enable_cinn()
1386

1387
        np.random.seed(1024)
1388
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1389
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1390
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1391
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1392 1393
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1394 1395 1396 1397
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1398
        self.convert_input_output()
1399

1400 1401 1402
    def init_shape(self):
        self.shape = [4, 25]

1403 1404 1405
    def if_enable_cinn(self):
        pass

1406
    def test_check_grad(self):
1407 1408
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1409
        self.check_grad(['X'], 'Out', check_prim=True)
1410

1411

1412 1413 1414 1415 1416
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1417
class TestCeil(TestActivation):
D
dzhwinter 已提交
1418 1419
    def setUp(self):
        self.op_type = "ceil"
1420
        self.python_api = paddle.ceil
1421
        self.init_dtype()
1422
        self.init_shape()
1423

1424
        np.random.seed(1024)
1425
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1426 1427 1428 1429
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1430
        self.convert_input_output()
D
dzhwinter 已提交
1431

1432 1433 1434
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1435
    # The same reason with TestFloor
C
chengduo 已提交
1436
    def test_check_grad(self):
1437 1438 1439
        pass


1440 1441 1442 1443 1444
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1445
class TestFloor(TestActivation):
D
dzhwinter 已提交
1446 1447
    def setUp(self):
        self.op_type = "floor"
1448
        self.prim_op_type = "prim"
1449
        self.python_api = paddle.floor
1450
        self.public_python_api = paddle.floor
1451
        self.init_dtype()
1452
        self.init_shape()
1453
        self.if_enable_cinn()
1454

1455
        np.random.seed(1024)
1456
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1457 1458 1459 1460
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1461
        self.convert_input_output()
D
dzhwinter 已提交
1462

1463 1464 1465
    def init_shape(self):
        self.shape = [10, 12]

1466 1467 1468
    def if_enable_cinn(self):
        pass

D
dzhwinter 已提交
1469
    # the gradient on floor, ceil, round is undefined.
1470
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1471 1472
    # The same reason with TestFloor
    def test_check_grad(self):
1473 1474
        pass

1475
    def test_check_grad_for_prim(self):
1476 1477 1478 1479
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
1480 1481 1482 1483 1484 1485 1486 1487
        if core.is_compiled_with_cuda():
            self.check_grad_with_place(
                paddle.CUDAPlace(0),
                ['X'],
                'Out',
                check_prim=True,
                only_check_prim=True,
            )
1488 1489


1490
class TestFloor_ZeroDim(TestFloor):
1491 1492 1493 1494
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1495
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1496 1497
    def setUp(self):
        self.op_type = "cos"
W
wanghuancoder 已提交
1498
        self.python_api = paddle.cos
1499 1500
        self.public_python_api = paddle.cos
        self.prim_op_type = "prim"
1501
        self.init_dtype()
1502
        self.init_shape()
1503
        self.if_enable_cinn()
1504

1505
        np.random.seed(1024)
1506
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1507 1508 1509
        out = np.cos(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1510
        self.convert_input_output()
C
add sin  
chengduoZH 已提交
1511

1512 1513 1514
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1515
    def test_check_grad(self):
1516 1517
        if self.dtype == np.float16:
            return
1518
        self.check_grad(['X'], 'Out', check_prim=True)
C
add sin  
chengduoZH 已提交
1519

1520 1521 1522
    def if_enable_cinn(self):
        pass

1523

1524 1525 1526 1527 1528
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1529 1530 1531 1532
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
W
wanghuancoder 已提交
1533
        self.python_api = paddle.tan
J
joejiong 已提交
1534
        self.init_dtype()
1535 1536
        self.init_shape()

J
joejiong 已提交
1537
        self.dtype = 'float32'
1538
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1539 1540 1541
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1542
            else paddle.CPUPlace()
1543
        )
J
joejiong 已提交
1544 1545 1546 1547 1548

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}
1549
        self.convert_input_output()
J
joejiong 已提交
1550

1551 1552 1553
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1554 1555 1556 1557 1558
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1570 1571 1572
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1573
            else paddle.CPUPlace()
1574
        )
1575

J
joejiong 已提交
1576 1577 1578 1579
    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1580
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1581 1582

    def test_static_api(self):
1583
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1584 1585 1586 1587 1588 1589 1590
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', [11, 17], self.dtype)
                out = paddle.tan(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1591 1592 1593 1594

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1595 1596 1597
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1598 1599 1600 1601 1602 1603 1604 1605
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1606 1607 1608
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
W
wanghuancoder 已提交
1609
        self.python_api = paddle.acos
1610
        self.init_dtype()
1611
        self.init_shape()
1612

1613
        np.random.seed(1024)
1614
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1615 1616 1617 1618
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1619
        self.convert_input_output()
1620

1621 1622 1623
    def init_shape(self):
        self.shape = [10, 12]

1624 1625 1626
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1627
        self.check_grad(['X'], 'Out')
1628 1629


1630 1631 1632 1633 1634
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1635
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1636 1637
    def setUp(self):
        self.op_type = "sin"
W
wanghuancoder 已提交
1638
        self.python_api = paddle.sin
1639 1640
        self.public_python_api = paddle.sin
        self.prim_op_type = "prim"
1641
        self.init_dtype()
1642
        self.init_shape()
1643
        self.if_enable_cinn()
1644

1645
        np.random.seed(1024)
1646
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1647 1648 1649
        out = np.sin(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1650
        self.convert_input_output()
C
add cos  
chengduoZH 已提交
1651

1652 1653 1654
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1655
    def test_check_grad(self):
1656 1657
        if self.dtype == np.float16:
            return
1658
        self.check_grad(['X'], 'Out', check_prim=True)
C
add cos  
chengduoZH 已提交
1659

1660 1661 1662
    def if_enable_cinn(self):
        pass

C
add cos  
chengduoZH 已提交
1663

1664 1665 1666 1667 1668
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1669 1670 1671
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
W
wanghuancoder 已提交
1672
        self.python_api = paddle.asin
1673
        self.init_dtype()
1674
        self.init_shape()
1675

1676
        np.random.seed(2048)
1677
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1678 1679 1680 1681
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1682
        self.convert_input_output()
1683

1684 1685 1686
    def init_shape(self):
        self.shape = [10, 12]

1687 1688 1689
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1690
        self.check_grad(['X'], 'Out')
1691 1692


1693 1694 1695 1696 1697
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1698 1699 1700
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
W
wanghuancoder 已提交
1701
        self.python_api = paddle.acosh
X
xiaoting 已提交
1702
        self.init_dtype()
1703
        self.init_shape()
X
xiaoting 已提交
1704 1705

        np.random.seed(1024)
1706
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1707 1708 1709 1710
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1711
        self.convert_input_output()
X
xiaoting 已提交
1712

1713 1714 1715
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1716 1717 1718 1719 1720 1721
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1722 1723 1724 1725 1726
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1727 1728 1729
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
W
wanghuancoder 已提交
1730
        self.python_api = paddle.asinh
X
xiaoting 已提交
1731
        self.init_dtype()
1732
        self.init_shape()
X
xiaoting 已提交
1733 1734

        np.random.seed(1024)
1735
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1736 1737 1738 1739
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1740
        self.convert_input_output()
X
xiaoting 已提交
1741

1742 1743 1744
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1745 1746 1747 1748 1749 1750
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1751 1752 1753 1754 1755
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1756 1757 1758
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
W
wanghuancoder 已提交
1759
        self.python_api = paddle.atanh
X
xiaoting 已提交
1760
        self.init_dtype()
1761
        self.init_shape()
X
xiaoting 已提交
1762 1763

        np.random.seed(400)
1764
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1765 1766 1767 1768
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1769
        self.convert_input_output()
X
xiaoting 已提交
1770

1771 1772 1773
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1774 1775 1776 1777 1778 1779
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1780 1781 1782 1783 1784
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1785
class TestRound(TestActivation):
D
dzhwinter 已提交
1786 1787
    def setUp(self):
        self.op_type = "round"
1788
        self.python_api = paddle.round
1789
        self.init_dtype()
1790
        self.init_shape()
1791

1792
        np.random.seed(1024)
1793
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1794 1795 1796 1797
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1798
        self.convert_input_output()
D
dzhwinter 已提交
1799

1800 1801 1802
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1803
    def test_check_grad(self):
1804 1805 1806
        pass


1807 1808 1809 1810 1811
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1812
class TestRelu(TestActivation):
1813
    def setUp(self):
Q
qijun 已提交
1814
        self.op_type = "relu"
K
Kang Zhao 已提交
1815 1816
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
1817
        self.public_python_api = paddle.nn.functional.relu
K
Kexin Zhao 已提交
1818
        self.init_dtype()
1819
        self.init_shape()
1820
        self.if_enable_cinn()
K
Kexin Zhao 已提交
1821

1822
        np.random.seed(1024)
1823 1824 1825 1826 1827
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0)
        self.inputs = {'X': x}
K
Kexin Zhao 已提交
1828 1829

        self.outputs = {'Out': out}
1830
        self.convert_input_output()
1831 1832

    def test_check_grad(self):
K
Kexin Zhao 已提交
1833 1834
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1835 1836 1837 1838 1839
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

1840 1841
    def if_enable_cinn(self):
        pass
A
Adam 已提交
1842 1843


1844 1845 1846 1847 1848
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []


1849 1850 1851
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1852
        np.random.seed(1024)
1853
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1854 1855 1856
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1857
            else paddle.CPUPlace()
1858
        )
1859 1860 1861 1862
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1863 1864

    def test_static_api(self):
1865
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1866
            with paddle.static.program_guard(paddle.static.Program()):
1867
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1868 1869 1870 1871 1872 1873 1874 1875
                out1 = self.relu(x)
                m = paddle.nn.ReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.maximum(self.x_np, 0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1876 1877 1878 1879

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1880 1881
        out1 = m(x)
        out2 = self.relu(x)
1882 1883
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1884
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1885

1886
    def test_errors(self):
1887 1888
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1889 1890 1891 1892
                with paddle.static.program_guard(paddle.static.Program()):
                    # The input type must be Variable.
                    self.assertRaises(TypeError, self.relu, 1)
                    # The input dtype must be float16, float32, float64.
1893
                    x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1894 1895 1896 1897
                        name='x_int32', shape=[10, 12], dtype='int32'
                    )
                    self.assertRaises(TypeError, self.relu, x_int32)
                    # support the input dtype is float16
1898
                    x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1899 1900 1901
                        name='x_fp16', shape=[10, 12], dtype='float16'
                    )
                    self.relu(x_fp16)
1902 1903 1904 1905 1906 1907


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1908 1909


1910 1911 1912 1913 1914 1915
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1916
class TestLeakyRelu(TestActivation):
1917 1918 1919
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1920 1921
    def setUp(self):
        self.op_type = "leaky_relu"
W
wanghuancoder 已提交
1922
        self.python_api = paddle.nn.functional.leaky_relu
1923 1924
        self.public_python_api = paddle.nn.functional.leaky_relu
        self.prim_op_type = "comp"
A
Adam 已提交
1925
        self.init_dtype()
1926
        self.init_shape()
1927
        self.if_enable_cinn()
1928
        alpha = self.get_alpha()
A
Adam 已提交
1929

1930
        np.random.seed(1024)
1931
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1932
        # The same reason with TestAbs
1933 1934
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1935

1936
        self.inputs = {'X': x}
A
Adam 已提交
1937
        self.outputs = {'Out': out}
1938
        self.attrs = {'alpha': alpha}
1939
        self.convert_input_output()
A
Adam 已提交
1940

1941 1942 1943
    def if_enable_cinn(self):
        pass

1944 1945 1946
    def test_check_output(self):
        self.check_output(check_prim=True)

A
Adam 已提交
1947 1948 1949
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1950
        self.check_grad(['X'], 'Out', check_prim=True)
1951 1952


1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1968 1969 1970 1971
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []

1972
    def if_enable_cinn(self):
1973 1974
        self.enable_cinn = False

1975

1976 1977 1978
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1979
        np.random.seed(1024)
1980
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1981 1982 1983
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1984
            else paddle.CPUPlace()
1985
        )
1986 1987

    def test_static_api(self):
1988
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1989
            with paddle.static.program_guard(paddle.static.Program()):
1990
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1991 1992 1993 1994 1995 1996 1997 1998
                out1 = F.leaky_relu(x)
                m = paddle.nn.LeakyReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_leaky_relu(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1999 2000

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
2001
        x = paddle.to_tensor(self.x_np)
2002 2003 2004 2005 2006
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
2007
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2008 2009 2010 2011 2012 2013

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
2014
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2015

2016
    def test_errors(self):
2017
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2018 2019 2020 2021
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.leaky_relu, 1)
                # The input dtype must be float16, float32, float64.
2022
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2023 2024 2025 2026
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.leaky_relu, x_int32)
                # support the input dtype is float16
2027
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2028 2029 2030
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.leaky_relu(x_fp16)
2031 2032


2033 2034
def gelu(x, approximate):
    if approximate:
2035 2036 2037 2038 2039 2040 2041 2042
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
2043 2044 2045 2046 2047 2048
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
2049 2050
    def setUp(self):
        self.op_type = "gelu"
2051 2052
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2053
        self.public_python_api = paddle.nn.functional.gelu
C
Clementine 已提交
2054
        self.init_dtype()
2055
        self.init_shape()
2056
        approximate = True
2057
        np.random.seed(1024)
2058
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2059
        out = gelu(x, approximate)
C
Clementine 已提交
2060

2061
        self.inputs = {'X': x}
2062 2063 2064
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

2065 2066 2067 2068
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu device, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2069 2070 2071
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
cxxly 已提交
2072

2073 2074 2075
    def test_check_output(self):
        self.check_output(check_prim=True)

2076 2077 2078
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2079
        self.check_grad(['X'], 'Out', check_prim=True)
2080 2081 2082 2083 2084


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
2085 2086
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2087
        self.public_python_api = paddle.nn.functional.gelu
2088
        self.init_dtype()
2089
        self.init_shape()
2090
        approximate = False
2091
        np.random.seed(2048)
2092
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2093
        out = gelu(x, approximate)
2094
        self.if_enable_cinn()
C
Clementine 已提交
2095

2096
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
C
Clementine 已提交
2097
        self.outputs = {'Out': out}
2098
        self.convert_input_output()
2099
        self.attrs = {"approximate": approximate}
2100 2101 2102 2103
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2104 2105 2106
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
Clementine 已提交
2107

2108
    def if_enable_cinn(self):
2109
        pass
2110 2111 2112 2113

    def test_check_output(self):
        self.check_output(check_prim=True)

C
Clementine 已提交
2114 2115 2116
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2117
        self.check_grad(['X'], 'Out', check_prim=True)
C
Clementine 已提交
2118 2119


2120 2121 2122 2123 2124
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []


2125 2126 2127
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2128
        np.random.seed(1024)
2129
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2130 2131 2132
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2133
            else paddle.CPUPlace()
2134
        )
C
cxxly 已提交
2135 2136
        self.enable_cinn = False

2137 2138 2139 2140
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2141 2142

    def test_static_api(self):
2143
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2144
            with paddle.static.program_guard(paddle.static.Program()):
2145
                x = paddle.static.data('X', [11, 17], dtype="float32")
W
wanghuancoder 已提交
2146 2147 2148 2149 2150 2151 2152 2153
                out1 = F.gelu(x)
                m = paddle.nn.GELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = gelu(self.x_np, False)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2154 2155 2156 2157 2158 2159 2160 2161

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2162
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2163 2164 2165 2166 2167 2168

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2169
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2170 2171

    def test_errors(self):
2172
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2173 2174 2175 2176
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.gelu, 1)
                # The input dtype must be float16, float32, float64.
2177
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2178 2179 2180 2181
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.gelu, x_int32)
                # support the input dtype is float16
2182
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2183 2184 2185
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.gelu(x_fp16)
2186 2187


C
chengduo 已提交
2188
class TestBRelu(TestActivation):
2189 2190
    def setUp(self):
        self.op_type = "brelu"
W
wanghuancoder 已提交
2191
        self.python_api = paddle.nn.functional.hardtanh
2192 2193
        self.init_dtype()

2194
        np.random.seed(1024)
Z
zhupengyang 已提交
2195
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2196 2197
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2198 2199
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2200
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2201 2202 2203
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2204 2205

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
F
fengjiayi 已提交
2206
        self.outputs = {'Out': t}
2207 2208
        self.convert_input_output()
        self.attrs = {'t_min': t_min, 't_max': t_max}
2209 2210

    def test_check_grad(self):
2211 2212
        if self.dtype == np.float16:
            return
2213
        self.check_grad(['X'], 'Out')
2214

2215

2216 2217 2218 2219 2220 2221 2222
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2223
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2224
    def setUp(self):
2225
        self.op_type = "relu6"
2226
        self.init_dtype()
2227
        self.init_shape()
2228
        self.python_api = paddle.nn.functional.relu6
2229

2230
        np.random.seed(1024)
2231
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2232
        x[np.abs(x) < 0.005] = 0.02
2233
        out = ref_relu6(x)
2234

2235
        self.attrs = {'threshold': 6.0}
2236 2237

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2238
        self.outputs = {'Out': out}
2239
        self.convert_input_output()
K
Kavya Srinet 已提交
2240

2241 2242 2243
    def init_shape(self):
        self.shape = [10, 12]

2244 2245 2246
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2247
        self.check_grad(['X'], 'Out')
2248 2249


2250 2251 2252 2253 2254
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2255 2256 2257
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2258
        np.random.seed(1024)
2259 2260
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2261 2262 2263
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2264
            else paddle.CPUPlace()
2265
        )
2266 2267

    def test_static_api(self):
2268
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2269
            with paddle.static.program_guard(paddle.static.Program()):
2270
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2271 2272 2273 2274 2275 2276 2277 2278
                out1 = F.relu6(x)
                relu6 = paddle.nn.ReLU6()
                out2 = relu6(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_relu6(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2279 2280 2281 2282 2283 2284 2285 2286

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2287
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2288 2289

    def test_fluid_api(self):
2290
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2291
            with fluid.program_guard(fluid.Program()):
2292
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2293 2294 2295 2296 2297
                out = paddle.nn.functional.relu6(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_relu6(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2298

2299
    def test_errors(self):
2300
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2301 2302 2303 2304
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.relu6, 1)
                # The input dtype must be float16, float32, float64.
2305
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2306 2307 2308 2309
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.relu6, x_int32)
                # support the input dtype is float16
2310
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2311 2312 2313
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.relu6(x_fp16)
2314 2315


2316 2317
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
2318
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340
            with warnings.catch_warnings(record=True) as context:
                warnings.simplefilter("always")

                helper = LayerHelper("relu6")
                data = paddle.static.data(
                    name='data', shape=[None, 3, 32, 32], dtype='float32'
                )
                out = helper.create_variable_for_type_inference(
                    dtype=data.dtype
                )
                os.environ['FLAGS_print_extra_attrs'] = "1"
                helper.append_op(
                    type="relu6",
                    inputs={'X': data},
                    outputs={'Out': out},
                    attrs={'threshold': 6.0},
                )
                self.assertTrue(
                    "op relu6 use extra_attr: threshold"
                    in str(context[-1].message)
                )
                os.environ['FLAGS_print_extra_attrs'] = "0"
2341 2342


2343
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2344 2345 2346 2347
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2348 2349 2350
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2351 2352


H
huangjun12 已提交
2353 2354 2355 2356
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2357
        self.init_shape()
R
Roc 已提交
2358
        self.prim_op_type = "comp"
2359
        self.python_api = paddle.nn.functional.hardswish
2360
        self.public_python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2361

2362
        np.random.seed(1024)
2363
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2364 2365 2366
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2367
        # the same with TestAbs
H
huangjun12 已提交
2368 2369
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2370
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2371

2372
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
H
huangjun12 已提交
2373
        self.outputs = {'Out': out}
2374 2375
        self.convert_input_output()
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
H
huangjun12 已提交
2376

2377 2378 2379
    def init_shape(self):
        self.shape = [10, 12]

2380 2381 2382
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2383
    def test_check_grad(self):
2384 2385 2386 2387 2388 2389
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2390 2391

    def test_check_output(self):
W
wanghuancoder 已提交
2392
        self.check_output(check_prim=True)
H
huangjun12 已提交
2393 2394


2395
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2396 2397 2398 2399
    def init_shape(self):
        self.shape = []


2400 2401 2402 2403
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2404 2405 2406
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2407
            else paddle.CPUPlace()
2408
        )
2409 2410

    def test_static_api(self):
2411
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2412
            with paddle.static.program_guard(paddle.static.Program()):
2413
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2414 2415 2416 2417 2418 2419 2420 2421
                out1 = F.hardswish(x)
                m = paddle.nn.Hardswish()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardswish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2422 2423

    def test_dygraph_api(self):
2424
        x = paddle.to_tensor([11648.0, 11448.0])
2425 2426 2427
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2428
        out_ref = [11648.0, 11448.0]
2429
        for r in [out1, out2]:
2430
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2431 2432

    def test_fluid_api(self):
2433
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2434
            with fluid.program_guard(fluid.Program()):
2435
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2436 2437 2438 2439 2440 2441
                out = paddle.nn.functional.hardswish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardswish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

2442
        x = paddle.to_tensor(self.x_np)
2443
        out = paddle.nn.functional.hardswish(x)
2444
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2445 2446

    def test_errors(self):
2447
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2448 2449 2450 2451
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardswish, 1)
                # The input dtype must be float16, float32, float64.
2452
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2453 2454 2455 2456
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardswish, x_int32)
                # support the input dtype is float16
2457
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2458 2459 2460
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardswish(x_fp16)
2461 2462


C
chengduo 已提交
2463
class TestSoftRelu(TestActivation):
2464 2465
    def setUp(self):
        self.op_type = "soft_relu"
2466 2467
        self.init_dtype()

2468
        np.random.seed(4096)
2469
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2470
        threshold = 2.0
Q
qijun 已提交
2471 2472
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2473
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2474 2475 2476
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2477
        out = np.log(np.exp(t) + 1)
2478 2479 2480

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2481 2482
        self.convert_input_output()
        self.attrs = {'threshold': threshold}
2483

2484 2485 2486
    def test_check_output(self):
        self.check_output(check_dygraph=False)

2487
    def test_check_grad(self):
2488 2489
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2490 2491 2492
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.02, check_dygraph=False
        )
2493

2494

2495
def elu(x, alpha):
Z
zhupengyang 已提交
2496
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2497 2498 2499
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2500
class TestELU(TestActivation):
2501 2502
    def setUp(self):
        self.op_type = "elu"
2503
        self.init_dtype()
2504
        self.init_shape()
W
wanghuancoder 已提交
2505
        self.python_api = paddle.nn.functional.elu
2506

2507
        np.random.seed(1024)
2508
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2509
        alpha = self.get_alpha()
2510
        out = elu(x, alpha)
2511 2512
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
2513 2514

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2515
        self.outputs = {'Out': out}
2516 2517
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2518

2519 2520 2521
    def init_shape(self):
        self.shape = [10, 12]

2522
    def test_check_grad(self):
2523 2524
        if self.dtype == np.float16:
            return
2525
        self.check_grad(['X'], 'Out')
2526

Z
zhupengyang 已提交
2527
    def get_alpha(self):
2528
        return 1.0
Z
zhupengyang 已提交
2529 2530 2531 2532 2533 2534


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2535

2536 2537 2538 2539 2540
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2541 2542 2543
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2544
        np.random.seed(1024)
2545
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2546 2547 2548
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2549
            else paddle.CPUPlace()
2550
        )
2551 2552 2553 2554
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2555 2556

    def test_static_api(self):
2557
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2558
            with paddle.static.program_guard(paddle.static.Program()):
2559
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2560 2561 2562 2563 2564 2565 2566 2567
                out1 = self.elu(x)
                m = paddle.nn.ELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = elu(self.x_np, 1.0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2568 2569 2570

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
2571 2572
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2573 2574 2575 2576
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2577
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2578

2579 2580
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2581 2582 2583 2584
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2585
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2586

2587
    def test_errors(self):
2588
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2589 2590 2591 2592
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.elu, 1)
                # The input dtype must be float16, float32, float64.
2593
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2594 2595 2596 2597
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.elu, x_int32)
                # support the input dtype is float16
2598
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2599 2600 2601
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.elu(x_fp16)
2602 2603


Z
zhupengyang 已提交
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)


2614 2615 2616 2617 2618 2619 2620 2621 2622
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2623
        self.init_shape()
2624

2625
        self.python_api = paddle.nn.functional.celu
2626
        np.random.seed(1024)
2627
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2628 2629
        alpha = 1.5
        out = celu(x, alpha)
2630 2631

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2632
        self.outputs = {'Out': out}
2633 2634
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2635

2636 2637 2638
    def init_shape(self):
        self.shape = [10, 12]

2639 2640 2641
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2642
        self.check_grad(['X'], 'Out')
2643 2644


2645 2646 2647 2648 2649
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2650 2651 2652 2653 2654
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2655 2656 2657
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2658
            else paddle.CPUPlace()
2659
        )
2660 2661 2662 2663 2664 2665
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
2666
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2667
            with paddle.static.program_guard(paddle.static.Program()):
2668
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2669 2670 2671 2672 2673 2674 2675 2676
                out1 = self.celu(x, 1.5)
                m = paddle.nn.CELU(1.5)
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = celu(self.x_np, 1.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2677 2678 2679 2680 2681 2682 2683 2684 2685

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2686
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2687 2688 2689 2690 2691 2692 2693

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2694
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2695 2696

    def test_errors(self):
2697
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2698 2699 2700 2701
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.celu, 1)
                # The input dtype must be float16, float32, float64.
2702
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2703 2704 2705 2706
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.celu, x_int32)
                # The alpha must be not equal 0
2707
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
2708 2709 2710 2711
                    name='x_fp32', shape=[10, 12], dtype='float32'
                )
                self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
                # support the input dtype is float16
2712
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2713 2714 2715
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.celu(x_fp16)
2716 2717


C
chengduo 已提交
2718
class TestReciprocal(TestActivation):
Q
qijun 已提交
2719 2720
    def setUp(self):
        self.op_type = "reciprocal"
2721
        self.python_api = paddle.reciprocal
2722
        self.init_dtype()
2723
        self.init_shape()
2724

2725
        np.random.seed(1024)
2726
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2727 2728 2729 2730
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2731
        self.convert_input_output()
Q
qijun 已提交
2732 2733

    def test_check_grad(self):
2734 2735
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2736
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
2737 2738

    def test_check_output(self):
W
wanghuancoder 已提交
2739
        self.check_output()
Q
qijun 已提交
2740 2741


2742 2743 2744 2745 2746
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2747
class TestLog(TestActivation):
Q
qijun 已提交
2748 2749
    def setUp(self):
        self.op_type = "log"
2750
        self.prim_op_type = "prim"
2751
        self.python_api = paddle.log
2752
        self.public_python_api = paddle.log
2753
        self.init_dtype()
2754
        self.init_shape()
2755
        self.if_enable_cinn()
2756

2757
        np.random.seed(1024)
2758
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2759 2760 2761 2762
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2763
        self.convert_input_output()
Q
qijun 已提交
2764

2765 2766 2767
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
2768
    def test_check_grad(self):
2769 2770
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2771
        self.check_grad(['X'], 'Out', check_prim=True)
Q
qijun 已提交
2772

2773
    def test_error(self):
2774 2775
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2776 2777 2778 2779 2780 2781
                in1 = paddle.static.data(
                    name="in1", shape=[11, 17], dtype="int32"
                )
                in2 = paddle.static.data(
                    name="in2", shape=[11, 17], dtype="int64"
                )
2782

W
wanghuancoder 已提交
2783 2784
                self.assertRaises(TypeError, paddle.log, in1)
                self.assertRaises(TypeError, paddle.log, in2)
2785

2786

2787 2788
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2789
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2790 2791 2792 2793 2794 2795 2796 2797 2798 2799
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2800 2801


2802 2803 2804 2805 2806
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2807 2808 2809
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2810
        self.python_api = paddle.log2
J
joejiong 已提交
2811
        self.init_dtype()
2812
        self.init_shape()
J
joejiong 已提交
2813

2814
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2815 2816 2817 2818
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2819
        self.convert_input_output()
J
joejiong 已提交
2820 2821 2822 2823

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2824
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2825 2826

    def test_error(self):
2827
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2828 2829
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2830

W
wanghuancoder 已提交
2831 2832
            self.assertRaises(TypeError, paddle.log2, in1)
            self.assertRaises(TypeError, paddle.log2, in2)
J
joejiong 已提交
2833 2834

    def test_api(self):
2835
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log2(data_x)
                exe = paddle.static.Executor(place=fluid.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log2(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2854 2855 2856 2857 2858 2859 2860 2861

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2862
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2863 2864


2865 2866 2867 2868 2869
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2870 2871 2872
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2873
        self.python_api = paddle.log10
J
joejiong 已提交
2874
        self.init_dtype()
2875
        self.init_shape()
J
joejiong 已提交
2876

2877
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2878 2879 2880 2881
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2882
        self.convert_input_output()
J
joejiong 已提交
2883 2884 2885 2886

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2887
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2888

2889 2890 2891 2892 2893 2894 2895

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2896
    def test_error(self):
2897
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2898 2899
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2900

W
wanghuancoder 已提交
2901 2902
            self.assertRaises(TypeError, paddle.log10, in1)
            self.assertRaises(TypeError, paddle.log10, in2)
J
joejiong 已提交
2903 2904

    def test_api(self):
2905
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log10(data_x)
                exe = paddle.static.Executor(place=paddle.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log10(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2924 2925 2926 2927 2928 2929 2930 2931

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2932
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2933 2934


2935 2936 2937
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2938
        self.python_api = paddle.log1p
2939
        self.init_dtype()
2940
        self.init_shape()
2941

2942
        np.random.seed(1024)
2943
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2944 2945 2946 2947
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2948
        self.convert_input_output()
2949 2950 2951 2952

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2953
        self.check_grad(['X'], 'Out')
2954

2955

2956 2957
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2958
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2959 2960 2961 2962 2963 2964 2965 2966 2967 2968
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2969 2970


2971 2972 2973 2974 2975 2976
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
2977
    def test_api(self):
2978
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x",
                    shape=[11, 17],
                    dtype="float64",
                )

                out1 = paddle.log1p(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (res1,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log1p(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
2997 2998 2999 3000 3001 3002 3003 3004

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
3005
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
3006 3007


C
chengduo 已提交
3008
class TestSquare(TestActivation):
Q
qijun 已提交
3009 3010
    def setUp(self):
        self.op_type = "square"
3011
        self.python_api = paddle.square
3012
        self.init_dtype()
3013
        self.init_shape()
3014

3015
        np.random.seed(1024)
3016
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3017 3018 3019 3020
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3021
        self.convert_input_output()
Q
qijun 已提交
3022 3023

    def test_check_grad(self):
3024 3025
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3026
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
3027 3028

    def test_check_output(self):
W
wanghuancoder 已提交
3029
        self.check_output()
Q
qijun 已提交
3030

3031

3032 3033 3034 3035 3036
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


3037
@unittest.skipIf(
R
ronnywang 已提交
3038 3039
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3040
)
3041 3042 3043
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
3044
        self.python_api = paddle.square
3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3061
        self.check_output_with_place(place)
3062 3063 3064

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3065
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
3066 3067


C
chengduo 已提交
3068
class TestPow(TestActivation):
3069 3070
    def setUp(self):
        self.op_type = "pow"
3071
        self.prim_op_type = "comp"
3072
        self.python_api = paddle.pow
3073
        self.public_python_api = paddle.pow
3074
        self.init_dtype()
3075
        self.init_shape()
3076
        self.if_enable_cinn()
3077

3078
        np.random.seed(1024)
3079
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
3080 3081 3082 3083
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3084 3085
        self.attrs = {'factor': 3.0}
        self.convert_input_output()
3086

3087 3088 3089
    def if_enable_cinn(self):
        pass

3090
    def test_check_output(self):
3091
        self.check_output(check_prim=True)
3092

3093
    def test_check_grad(self):
3094 3095
        if self.dtype == np.float16:
            return
3096
        self.check_grad(['X'], 'Out', check_prim=True)
3097

3098

3099 3100 3101 3102 3103
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []


3104 3105 3106
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
3107
        self.python_api = paddle.pow
3108
        self.enable_cinn = False
3109 3110
        self.init_dtype()

3111
        np.random.seed(1024)
3112 3113 3114 3115 3116
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
W
wanghuancoder 已提交
3117
            'FactorTensor': np.array([3.0]).astype(self.dtype),
3118 3119 3120 3121 3122 3123
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
W
wanghuancoder 已提交
3124
        self.check_output()
3125 3126 3127 3128

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3129
        self.check_grad(['X'], 'Out')
3130 3131

    def test_api(self):
3132
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3133 3134 3135 3136 3137 3138 3139
            input = np.random.uniform(1, 2, [11, 17]).astype("float32")
            x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
            res = paddle.static.data(
                name="res", shape=[11, 17], dtype="float32"
            )

            factor_1 = 2.0
3140
            factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
W
wanghuancoder 已提交
3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152
            out_1 = paddle.pow(x, factor_1)
            out_2 = paddle.pow(x, factor_2)
            out_4 = paddle.pow(x, factor_1, name='pow_res')
            out_6 = paddle.pow(x, factor_2)
            self.assertEqual(('pow_res' in out_4.name), True)

            exe = fluid.Executor(place=fluid.CPUPlace())
            res_1, res_2, res, res_6 = exe.run(
                fluid.default_main_program(),
                feed={"x": input},
                fetch_list=[out_1, out_2, res, out_6],
            )
3153

W
wanghuancoder 已提交
3154 3155 3156
            assert np.allclose(res_1, np.power(input, 2))
            assert np.allclose(res_2, np.power(input, 3))
            assert np.allclose(res_6, np.power(input, 3))
3157 3158


3159 3160 3161 3162 3163
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3164
class TestSTanh(TestActivation):
3165 3166 3167 3168 3169 3170
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3171 3172
    def setUp(self):
        self.op_type = "stanh"
W
wanghuancoder 已提交
3173
        self.python_api = paddle.stanh
3174
        self.init_dtype()
3175 3176
        self.init_shape()

3177 3178
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3179

3180
        np.random.seed(1024)
3181
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3182 3183
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3184

3185
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3186
        self.outputs = {'Out': out}
3187 3188
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
        self.convert_input_output()
3189

Q
qijun 已提交
3190
    def test_check_grad(self):
3191 3192
        if self.dtype == np.float16:
            return
3193
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3194

3195

3196 3197 3198 3199 3200 3201 3202 3203 3204 3205
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3206 3207 3208 3209 3210
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3224 3225 3226
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3227
            else paddle.CPUPlace()
3228
        )
3229 3230

    def test_static_api(self):
3231
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3232
            with paddle.static.program_guard(paddle.static.Program()):
3233
                x = paddle.static.data('X', [10, 12])
W
wanghuancoder 已提交
3234 3235 3236 3237 3238 3239
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3240 3241 3242 3243 3244 3245

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3246
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3247 3248

    def test_fluid_api(self):
3249
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3250
            with fluid.program_guard(fluid.Program()):
3251
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
3252 3253 3254 3255 3256
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3257

3258
    def test_errors(self):
3259
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3260 3261 3262 3263
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.stanh, 1)
                # The input dtype must be float16, float32, float64.
3264
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3265 3266 3267 3268
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.stanh, x_int32)
                # support the input dtype is float16
3269
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3270 3271 3272
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.stanh(x_fp16)
3273 3274 3275 3276 3277 3278 3279 3280 3281 3282


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3283 3284


3285 3286
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3287 3288 3289 3290
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3291 3292 3293
    return out


C
chengduo 已提交
3294
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3295 3296
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3297
        self.python_api = paddle.nn.functional.softplus
3298
        self.init_dtype()
3299
        self.init_shape()
3300

3301 3302
        beta = 2
        threshold = 15
3303

3304
        np.random.seed(1024)
3305
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3306 3307 3308
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3309
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3310

3311 3312 3313
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3314
    def test_check_grad(self):
3315 3316
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3317
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
3318

3319

3320 3321 3322 3323 3324
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3325
@unittest.skipIf(
R
ronnywang 已提交
3326 3327
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3328
)
3329 3330 3331 3332
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()
W
wanghuancoder 已提交
3333
        self.python_api = paddle.nn.functional.softplus
3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3357 3358 3359 3360 3361
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3362
        np.random.seed(1024)
3363
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3364 3365 3366
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3367
            else paddle.CPUPlace()
3368
        )
3369 3370

    def test_static_api(self):
3371
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3372
            with paddle.static.program_guard(paddle.static.Program()):
3373
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3374 3375 3376 3377 3378 3379 3380 3381
                out1 = F.softplus(x, self.beta, self.threshold)
                softplus = paddle.nn.Softplus(self.beta, self.threshold)
                out2 = softplus(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3382 3383 3384 3385 3386 3387 3388 3389

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3390
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3391 3392

    def test_errors(self):
3393
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3394 3395 3396 3397
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softplus, 1)
                # The input dtype must be float16, float32, float64.
3398
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3399 3400 3401 3402
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softplus, x_int32)
                # support the input dtype is float16
3403
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3404 3405 3406
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softplus(x_fp16)
3407 3408 3409 3410 3411 3412 3413


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3414
class TestSoftsign(TestActivation):
3415 3416
    def setUp(self):
        self.op_type = "softsign"
3417
        self.init_dtype()
3418 3419
        self.init_shape()

3420
        self.python_api = paddle.nn.functional.softsign
3421

3422
        np.random.seed(1024)
3423
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3424
        out = ref_softsign(x)
3425 3426

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3427
        self.outputs = {'Out': out}
3428
        self.convert_input_output()
3429

3430 3431 3432
    def init_shape(self):
        self.shape = [10, 12]

3433
    def test_check_grad(self):
3434 3435
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3436
        self.check_grad(['X'], 'Out')
3437 3438


3439 3440 3441 3442 3443
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3444 3445 3446
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3447
        np.random.seed(1024)
3448
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3449 3450 3451
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3452
            else paddle.CPUPlace()
3453
        )
3454 3455

    def test_static_api(self):
3456
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3457
            with paddle.static.program_guard(paddle.static.Program()):
3458
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3459 3460 3461 3462 3463 3464 3465 3466
                out1 = F.softsign(x)
                softsign = paddle.nn.Softsign()
                out2 = softsign(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softsign(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3467 3468 3469 3470 3471 3472 3473 3474

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3475
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3476 3477

    def test_errors(self):
3478
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3479 3480 3481 3482
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softsign, 1)
                # The input dtype must be float16, float32, float64.
3483
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3484 3485 3486 3487
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softsign, x_int32)
                # support the input dtype is float16
3488
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3489 3490 3491
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softsign(x_fp16)
3492 3493


3494 3495 3496 3497 3498
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3499
class TestThresholdedRelu(TestActivation):
3500 3501
    def setUp(self):
        self.op_type = "thresholded_relu"
3502
        self.init_dtype()
3503
        self.init_shape()
W
wanghuancoder 已提交
3504
        self.python_api = paddle.nn.functional.thresholded_relu
3505

3506
        threshold = 15
3507

3508
        np.random.seed(1024)
3509
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3510 3511
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
3512 3513

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3514
        self.outputs = {'Out': out}
3515 3516
        self.attrs = {"threshold": threshold}
        self.convert_input_output()
3517

3518 3519 3520
    def init_shape(self):
        self.shape = [10, 12]

3521
    def test_check_grad(self):
3522 3523
        if self.dtype == np.float16:
            return
3524
        self.check_grad(['X'], 'Out')
3525 3526


3527 3528 3529 3530 3531
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3532 3533 3534 3535 3536 3537 3538
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3539 3540 3541
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3542
            else paddle.CPUPlace()
3543
        )
3544 3545

    def test_static_api(self):
3546
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3547
            with paddle.static.program_guard(paddle.static.Program()):
3548
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3549 3550 3551 3552 3553 3554 3555 3556
                out1 = F.thresholded_relu(x, self.threshold)
                thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
                out2 = thresholded_relu(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3557 3558

    def test_dygraph_api(self):
3559
        paddle.disable_static()
3560 3561 3562 3563 3564 3565
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3566
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3567

3568
    def test_errors(self):
3569
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3570 3571 3572 3573
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.thresholded_relu, 1)
                # The input dtype must be float16, float32, float64.
3574
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3575 3576 3577 3578
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.thresholded_relu, x_int32)
                # support the input dtype is float16
3579
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3580 3581 3582
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.thresholded_relu(x_fp16)
3583 3584


3585
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3586
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3587 3588


C
chengduo 已提交
3589
class TestHardSigmoid(TestActivation):
3590 3591
    def setUp(self):
        self.op_type = "hard_sigmoid"
3592 3593 3594 3595
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3596
        self.init_shape()
W
wanghuancoder 已提交
3597
        self.python_api = paddle.nn.functional.hardsigmoid
3598

3599
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3600
        lower_threshold = -self.offset / self.slope
3601
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3602

3603
        # Same reason as TestAbs
3604 3605 3606
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3607

3608
        out = ref_hardsigmoid(x, self.slope, self.offset)
3609

3610
        self.attrs = {'slope': self.slope, 'offset': self.offset}
3611 3612

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3613
        self.outputs = {'Out': out}
3614
        self.convert_input_output()
3615

3616 3617 3618
    def init_shape(self):
        self.shape = [10, 12]

3619 3620
    def set_attrs(self):
        pass
3621

3622

3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3634 3635 3636 3637 3638
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3639 3640 3641 3642
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3643 3644 3645
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3646
            else paddle.CPUPlace()
3647
        )
3648 3649

    def test_static_api(self):
3650
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3651 3652 3653 3654 3655 3656 3657 3658 3659 3660
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.hardsigmoid(x)
                m = paddle.nn.Hardsigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardsigmoid(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3661 3662 3663 3664 3665 3666 3667 3668

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3669
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3670 3671

    def test_fluid_api(self):
3672
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3673
            with fluid.program_guard(fluid.Program()):
3674
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3675 3676 3677 3678 3679 3680
                out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

3681
        paddle.disable_static(self.place)
3682
        x = paddle.to_tensor(self.x_np)
3683
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3684
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3685 3686

    def test_errors(self):
3687
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3688 3689 3690 3691
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardsigmoid, 1)
                # The input dtype must be float16, float32, float64.
3692
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3693 3694 3695 3696
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardsigmoid, x_int32)
                # support the input dtype is float16
3697
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3698 3699 3700
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardsigmoid(x_fp16)
3701 3702


3703 3704 3705 3706 3707
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3708
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3709 3710
    def setUp(self):
        self.op_type = "swish"
3711
        self.python_api = paddle.nn.functional.swish
3712
        self.init_dtype()
3713 3714
        self.init_shape()

3715
        np.random.seed(1024)
3716
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3717
        out = ref_swish(x)
3718 3719

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3720
        self.outputs = {'Out': out}
3721 3722
        self.attrs = {'beta': 1.0}
        self.convert_input_output()
A
Abhinav Arora 已提交
3723

3724 3725 3726
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3727
    def test_check_grad(self):
3728 3729
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3730 3731 3732 3733
        self.check_grad(
            ['X'],
            'Out',
        )
3734

A
Abhinav Arora 已提交
3735

3736 3737 3738 3739 3740
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3741 3742 3743 3744 3745
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3746 3747 3748
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3749
            else paddle.CPUPlace()
3750
        )
3751 3752

    def test_static_api(self):
3753
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3754 3755 3756 3757 3758 3759 3760 3761 3762 3763
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.swish(x)
                swish = paddle.nn.Swish()
                out2 = swish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_swish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3764

3765
    def test_dygraph_api(self):
3766 3767 3768 3769 3770 3771
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3772
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3773 3774

    def test_fluid_api(self):
3775
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3776
            with fluid.program_guard(fluid.Program()):
3777
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3778 3779 3780 3781 3782
                out = paddle.nn.functional.swish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_swish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3783

3784
    def test_errors(self):
3785
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3786 3787 3788 3789
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.swish, 1)
                # The input dtype must be float16, float32, float64.
3790
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3791 3792 3793 3794
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.swish, x_int32)
                # support the input dtype is float16
3795
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3796 3797 3798
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.swish(x_fp16)
3799 3800


3801 3802 3803 3804
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3805 3806 3807 3808 3809 3810
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3811
        self.python_api = paddle.nn.functional.mish
3812
        self.init_dtype()
3813
        self.init_shape()
3814 3815

        np.random.seed(1024)
3816
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3817
        out = ref_mish(x)
3818 3819

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3820
        self.outputs = {'Out': out}
3821
        self.convert_input_output()
3822

3823 3824 3825
    def init_shape(self):
        self.shape = [10, 12]

3826
    def test_check_output(self):
W
wanghuancoder 已提交
3827
        self.check_output()
3828

3829 3830 3831
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3832
        self.check_grad(['X'], 'Out')
3833 3834


3835 3836 3837 3838 3839
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3840 3841 3842 3843 3844
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3845 3846 3847
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3848
            else paddle.CPUPlace()
3849
        )
3850 3851

    def test_static_api(self):
3852
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3853 3854 3855 3856 3857 3858 3859 3860 3861 3862
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.mish(x)
                mish = paddle.nn.Mish()
                out2 = mish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_mish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3863 3864 3865 3866 3867 3868 3869 3870

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3871
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3872 3873

    def test_fluid_api(self):
3874
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3875
            with fluid.program_guard(fluid.Program()):
3876
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3877 3878 3879 3880 3881
                out = paddle.nn.functional.mish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_mish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3882 3883

    def test_errors(self):
3884
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3885 3886 3887 3888
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.mish, 1)
                # The input dtype must be float16, float32, float64.
3889
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3890 3891 3892 3893
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.mish, x_int32)
                # support the input dtype is float16
3894
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3895 3896 3897
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.mish(x_fp16)
3898 3899


3900
# ------------------ Test Cudnn Activation----------------------
3901
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3902 3903 3904
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3905 3906 3907 3908
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

3909
    cls_name = "{}_{}".format(parent.__name__, "cudnn")
3910 3911 3912 3913 3914 3915 3916 3917 3918 3919
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3920 3921
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
3922 3923 3924
    parent,
    atol=1e-3,
    grad_check=True,
3925
    check_dygraph=True,
3926
    check_prim=False,
3927
    enable_cinn=False,
3928
    grad_atol=1e-2,
3929
    **kwargs
3930 3931 3932 3933
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3934
    class TestActFp16(parent):
3935 3936 3937 3938 3939
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

C
chengduo 已提交
3940 3941
        def init_dtype(self):
            self.dtype = np.float16
3942

3943
        def if_enable_cinn(self):
3944 3945
            self.enable_cinn = enable_cinn

C
chengduo 已提交
3946
        def test_check_output(self):
3947
            place = core.CUDAPlace(0)
C
chengduo 已提交
3948 3949
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
3950
                self.check_output_with_place(
3951 3952 3953 3954
                    place,
                    atol=atol,
                    check_dygraph=check_dygraph,
                    check_prim=check_prim,
3955
                )
3956

C
chengduo 已提交
3957 3958 3959 3960
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3961
                self.check_grad_with_place(
3962 3963 3964
                    place,
                    ['X'],
                    'Out',
3965
                    check_dygraph=check_dygraph,
3966 3967
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
3968
                )
C
chengduo 已提交
3969

3970
    cls_name = "{}_{}".format(parent.__name__, "FP16OP")
C
chengduo 已提交
3971 3972 3973 3974
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


3975
create_test_act_fp16_class(TestActivation)
3976
create_test_act_fp16_class(TestExpFp32_Prim, check_prim=True, enable_cinn=True)
R
ronnywang 已提交
3977
create_test_act_fp16_class(TestExpm1)
3978 3979
create_test_act_fp16_class(TestSigmoid, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSilu, check_prim=True, enable_cinn=True)
C
chengduo 已提交
3980
create_test_act_fp16_class(TestLogSigmoid)
3981
create_test_act_fp16_class(TestTanh, check_prim=True, enable_cinn=True)
3982
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
3983
create_test_act_fp16_class(TestHardShrink)
3984
create_test_act_fp16_class(TestSoftshrink)
3985 3986 3987
create_test_act_fp16_class(TestSqrt, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSqrtComp, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestAbs, check_prim=True, enable_cinn=True)
C
chengduo 已提交
3988
create_test_act_fp16_class(TestCeil, grad_check=False)
3989 3990 3991
create_test_act_fp16_class(
    TestFloor, check_prim=True, grad_check=False, enable_cinn=True
)
3992 3993 3994 3995
create_test_act_fp16_class(TestCos)
create_test_act_fp16_class(TestTan)
create_test_act_fp16_class(TestCosh)
create_test_act_fp16_class(TestAcos)
C
chengduo 已提交
3996
create_test_act_fp16_class(TestSin)
3997
create_test_act_fp16_class(TestSinh)
3998 3999
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
4000 4001 4002
create_test_act_fp16_class(TestAcosh)
create_test_act_fp16_class(TestAsinh)
create_test_act_fp16_class(TestAtanh)
C
chengduo 已提交
4003
create_test_act_fp16_class(TestRound, grad_check=False)
4004
create_test_act_fp16_class(TestRelu, check_prim=True, enable_cinn=True)
4005 4006 4007
create_test_act_fp16_class(
    TestGelu,
    check_prim=True,
4008
    enable_cinn=True,
4009 4010
    rev_comp_rtol=1e-3,
    rev_comp_atol=1e-3,
4011 4012
    cinn_rtol=1e-3,
    cinn_atol=1e-3,
4013
)
C
chengduo 已提交
4014 4015
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
4016
create_test_act_fp16_class(TestSoftRelu, check_dygraph=False)
C
chengduo 已提交
4017
create_test_act_fp16_class(TestELU)
4018
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
4019
create_test_act_fp16_class(TestReciprocal)
4020
create_test_act_fp16_class(TestLog, check_prim=True)
4021
if core.is_compiled_with_rocm():
4022
    create_test_act_fp16_class(TestLog2)
4023
else:
4024 4025 4026
    create_test_act_fp16_class(TestLog2)
create_test_act_fp16_class(TestLog10)
create_test_act_fp16_class(TestLog1p)
C
chengduo 已提交
4027
create_test_act_fp16_class(TestSquare)
4028 4029 4030
create_test_act_fp16_class(TestPow, check_prim=True)
create_test_act_fp16_class(TestPow_factor_tensor)
create_test_act_fp16_class(TestSTanh)
C
chengduo 已提交
4031 4032 4033 4034
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
4035
create_test_act_fp16_class(TestSwish)
4036
create_test_act_fp16_class(TestHardSwish, check_prim=True)
4037
create_test_act_fp16_class(TestMish)
4038
create_test_act_fp16_class(TestLeakyRelu, check_prim=True, enable_cinn=True)
4039
create_test_act_fp16_class(
4040
    TestLeakyReluAlpha1, check_prim=True, enable_cinn=True
4041
)
4042 4043 4044 4045 4046 4047 4048 4049
create_test_act_fp16_class(
    TestLeakyReluAlpha2, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(
    TestLeakyReluAlpha3, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_fp16_class(TestRsqrt, check_prim=True, enable_cinn=True)
A
Abhinav Arora 已提交
4050

4051

4052
def create_test_act_bf16_class(
4053 4054 4055 4056 4057
    parent,
    atol=1e-2,
    grad_check=True,
    check_dygraph=True,
    check_prim=False,
4058
    enable_cinn=False,
4059 4060
    grad_atol=1e-2,
    **kwargs
4061 4062
):
    @unittest.skipIf(
4063 4064 4065
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
        "core is not compiled with CUDA and do not support bfloat16",
4066
    )
4067
    class TestActBF16(parent):
4068 4069 4070 4071 4072
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

4073
        def init_dtype(self):
4074 4075
            self.dtype = np.float32

4076 4077 4078
        def if_enable_cinn(self):
            self.enable_cinn = enable_cinn

4079 4080 4081
        def convert_input_output(self):
            self.inputs = {'X': convert_float_to_uint16(self.inputs['X'])}
            self.outputs = {'Out': convert_float_to_uint16(self.outputs['Out'])}
4082 4083 4084 4085
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
4086 4087 4088
            self.check_output_with_place(
                place, atol=atol, check_prim=check_prim
            )
4089 4090 4091

        def test_check_grad(self):
            place = core.CUDAPlace(0)
4092 4093
            if grad_check:
                self.check_grad_with_place(
4094 4095 4096 4097 4098
                    place,
                    ['X'],
                    'Out',
                    max_relative_error=grad_atol,
                    check_prim=check_prim,
4099
                )
4100

4101
    cls_name = "{}_{}".format(parent.__name__, "BF16OP")
4102 4103 4104 4105
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


4106
create_test_act_bf16_class(TestActivation)
4107
create_test_act_bf16_class(TestExpFp32_Prim, check_prim=True)
4108 4109 4110 4111
create_test_act_bf16_class(TestExpm1)
create_test_act_bf16_class(TestSigmoid, check_prim=True)
create_test_act_bf16_class(TestSilu, check_prim=True)
create_test_act_bf16_class(TestLogSigmoid)
4112
create_test_act_bf16_class(TestTanh, check_prim=True)
4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138
create_test_act_bf16_class(TestTanhshrink)
create_test_act_bf16_class(TestHardShrink)
create_test_act_bf16_class(TestSoftshrink)
create_test_act_bf16_class(TestSqrt, check_prim=True)
create_test_act_bf16_class(TestSqrtComp, check_prim=True)
create_test_act_bf16_class(TestAbs, check_prim=True)
create_test_act_bf16_class(TestCeil, grad_check=False)
create_test_act_bf16_class(TestFloor, grad_check=False, check_prim=True)
create_test_act_bf16_class(TestCos)
create_test_act_bf16_class(TestTan)
create_test_act_bf16_class(TestCosh)
create_test_act_bf16_class(TestAcos)
create_test_act_bf16_class(TestSin)
create_test_act_bf16_class(TestSinh)
create_test_act_bf16_class(TestAsin)
create_test_act_bf16_class(TestAtan)
create_test_act_bf16_class(TestAcosh)
create_test_act_bf16_class(TestAsinh)
create_test_act_bf16_class(TestAtanh)
create_test_act_bf16_class(TestRound, grad_check=False)
create_test_act_bf16_class(TestRelu, check_prim=True)
create_test_act_bf16_class(
    TestGelu,
    check_prim=True,
    rev_comp_rtol=1e-2,
    rev_comp_atol=1e-2,
4139 4140
    cinn_rtol=1e-2,
    cinn_atol=1e-2,
4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165
)
create_test_act_bf16_class(TestBRelu)
create_test_act_bf16_class(TestRelu6)
create_test_act_bf16_class(TestSoftRelu, check_dygraph=False)
create_test_act_bf16_class(TestELU)
create_test_act_bf16_class(TestCELU)
create_test_act_bf16_class(TestReciprocal)
create_test_act_bf16_class(TestLog, check_prim=True)
if core.is_compiled_with_rocm():
    create_test_act_bf16_class(TestLog2)
else:
    create_test_act_bf16_class(TestLog2)
create_test_act_bf16_class(TestLog10)
create_test_act_bf16_class(TestLog1p)
create_test_act_bf16_class(TestSquare)
create_test_act_bf16_class(TestPow, check_prim=True)
create_test_act_bf16_class(TestPow_factor_tensor)
create_test_act_bf16_class(TestSTanh)
create_test_act_bf16_class(TestSoftplus)
create_test_act_bf16_class(TestSoftsign)
create_test_act_bf16_class(TestThresholdedRelu)
create_test_act_bf16_class(TestHardSigmoid)
create_test_act_bf16_class(TestSwish)
create_test_act_bf16_class(TestHardSwish, check_prim=True)
create_test_act_bf16_class(TestMish)
4166 4167 4168 4169 4170 4171
create_test_act_bf16_class(TestLeakyRelu, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha1, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha2, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha3, check_prim=True)
create_test_act_bf16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_bf16_class(TestRsqrt, check_prim=True)
4172

Q
qijun 已提交
4173 4174
if __name__ == "__main__":
    unittest.main()