test_activation_op.py 134.0 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
Q
qijun 已提交
16
import unittest
17
import warnings
J
joejiong 已提交
18

Q
qijun 已提交
19
import numpy as np
20
from eager_op_test import OpTest, convert_float_to_uint16
21 22
from scipy.special import erf, expit

23
import paddle
24
import paddle.nn.functional as F
25 26
from paddle import fluid, static
from paddle.fluid import Program, core, program_guard
27
from paddle.fluid.layer_helper import LayerHelper
Q
qijun 已提交
28 29


30
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
31
    def test_errors(self):
32
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
            with program_guard(Program(), Program()):
                # The input type of sqrt op must be Variable or numpy.ndarray.
                in1 = 1
                self.assertRaises(TypeError, paddle.sqrt, in1)
                # The input dtype of sqrt op must be float16, float32, float64.
                in2 = paddle.static.data(
                    name='input2', shape=[-1, 12, 10], dtype="int32"
                )
                self.assertRaises(TypeError, paddle.sqrt, in2)

                in3 = paddle.static.data(
                    name='input3', shape=[-1, 12, 10], dtype="float16"
                )
                paddle.sqrt(x=in3)
Z
Zhaolong Xing 已提交
47 48


C
chengduo 已提交
49
class TestActivation(OpTest):
Q
qijun 已提交
50 51
    def setUp(self):
        self.op_type = "exp"
52
        self.init_dtype()
53
        self.init_shape()
54
        self.init_kernel_type()
55
        self.if_enable_cinn()
C
chentianyu03 已提交
56
        self.python_api = paddle.exp
57
        self.public_python_api = paddle.exp
58

59
        np.random.seed(2049)
60
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
61 62 63 64
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
65

66 67
        self.convert_input_output()

Q
qijun 已提交
68
    def test_check_output(self):
W
wanghuancoder 已提交
69
        self.check_output()
Q
qijun 已提交
70 71

    def test_check_grad(self):
72 73
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
74 75 76 77
        self.check_grad(
            ['X'],
            'Out',
        )
Q
qijun 已提交
78

79
    def init_dtype(self):
80
        self.dtype = np.float64
81

82 83 84
    def init_shape(self):
        self.shape = [11, 17]

85 86 87
    def init_kernel_type(self):
        pass

88 89 90
    def convert_input_output(self):
        pass

91 92 93
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
94

95 96 97 98 99
class TestActivation_ZeroDim(TestActivation):
    def init_shape(self):
        self.shape = []


100
class TestExpFp32_Prim(OpTest):
101 102 103 104 105 106
    def setUp(self):
        self.op_type = "exp"
        self.prim_op_type = "prim"
        self.init_dtype()
        self.init_shape()
        self.python_api = paddle.exp
107
        self.public_python_api = paddle.exp
108 109 110 111 112 113 114

        np.random.seed(2049)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
115
        self.if_enable_cinn()
116
        self.convert_input_output()
117 118 119 120 121 122 123 124 125 126 127 128 129

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        self.check_grad(['X'], 'Out', check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32

    def init_shape(self):
        self.shape = [12, 17]

130
    def if_enable_cinn(self):
131
        pass
132

133 134 135
    def convert_input_output(self):
        pass

136

137
class TestExpFp64_Prim(TestExpFp32_Prim):
138 139 140 141
    def init_dtype(self):
        self.dtype = np.float64


142
class TestExpPrim_ZeroDim(TestExpFp32_Prim):
143 144 145
    def init_shape(self):
        self.shape = []

146
    def if_enable_cinn(self):
147 148 149
        self.enable_cinn = False


R
ronnywang 已提交
150 151 152
class TestExpm1(TestActivation):
    def setUp(self):
        self.op_type = "expm1"
153
        self.python_api = paddle.expm1
R
ronnywang 已提交
154
        self.init_dtype()
155
        self.init_shape()
R
ronnywang 已提交
156 157

        np.random.seed(2049)
158
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
R
ronnywang 已提交
159 160 161 162
        out = np.expm1(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
163
        self.convert_input_output()
R
ronnywang 已提交
164 165

    def test_check_grad(self):
W
wanghuancoder 已提交
166
        self.check_grad(['X'], 'Out')
167 168

    def test_check_output(self):
W
wanghuancoder 已提交
169
        self.check_output()
R
ronnywang 已提交
170 171


172 173 174 175 176
class TestExpm1_ZeroDim(TestExpm1):
    def init_shape(self):
        self.shape = []


R
ronnywang 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
class TestExpm1API(unittest.TestCase):
    def init_dtype(self):
        self.dtype = 'float64'
        self.shape = [11, 17]

    def setUp(self):
        self.init_dtype()
        self.x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        self.out_ref = np.expm1(self.x)

        self.place = [paddle.CPUPlace()]
        if core.is_compiled_with_cuda():
            self.place.append(paddle.CUDAPlace(0))

    def test_static_api(self):
        def run(place):
193
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
194
                with paddle.static.program_guard(paddle.static.Program()):
195
                    X = paddle.static.data('X', self.shape, dtype=self.dtype)
W
wanghuancoder 已提交
196 197 198
                    out = paddle.expm1(X)
                    exe = paddle.static.Executor(place)
                    res = exe.run(feed={'X': self.x})
R
ronnywang 已提交
199
            for r in res:
200
                np.testing.assert_allclose(self.out_ref, r, rtol=1e-05)
R
ronnywang 已提交
201 202 203 204 205 206 207 208

        for place in self.place:
            run(place)

    def test_dygraph_api(self):
        def run(place):
            X = paddle.to_tensor(self.x)
            out = paddle.expm1(X)
209
            np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
R
ronnywang 已提交
210 211 212 213 214

        for place in self.place:
            run(place)

    def test_errors(self):
215
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
216
            with paddle.static.program_guard(paddle.static.Program()):
217
                X = paddle.static.data('X', self.shape, dtype='int32')
W
wanghuancoder 已提交
218
                self.assertRaises(TypeError, paddle.expm1, X)
R
ronnywang 已提交
219 220 221
        # The input dtype must be float16, float32, float64.


222
class TestParameter:
223
    def test_out_name(self):
224
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
225 226 227 228 229 230 231 232 233 234 235
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = eval("paddle.%s(data, name='Y')" % self.op_type)
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = eval("np.%s(np_x)" % self.op_type)
                np.testing.assert_allclose(result, expected, rtol=1e-05)
236 237 238 239 240 241 242

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
243
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
244 245


C
chengduo 已提交
246
class TestSigmoid(TestActivation):
Q
qijun 已提交
247 248
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
249 250
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
251
        self.public_python_api = paddle.nn.functional.sigmoid
252
        self.init_dtype()
253
        self.init_shape()
254
        self.if_enable_cinn()
255
        np.random.seed(1024)
256
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
257 258 259 260
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
261

262 263
        self.convert_input_output()

264 265 266
    def init_dtype(self):
        self.dtype = np.float32

267 268 269
    def if_enable_cinn(self):
        pass

270
    def test_check_grad(self):
271 272
        if self.dtype == np.float16:
            return
Z
zxcd 已提交
273
        self.check_grad(['X'], 'Out', max_relative_error=0.01, check_prim=True)
274

275

276 277 278 279
class TestSigmoid_ZeroDim(TestSigmoid):
    def init_shape(self):
        self.shape = []

280 281 282
    def if_enable_cinn(self):
        self.enable_cinn = False

283

284
@unittest.skipIf(
R
ronnywang 已提交
285 286
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
287
)
288 289 290
class TestSigmoidBF16(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
Z
zxcd 已提交
291 292
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.sigmoid
293
        self.public_python_api = paddle.nn.functional.sigmoid
294
        self.init_dtype()
295
        self.init_shape()
296
        self.if_enable_cinn()
297
        np.random.seed(1024)
298
        x = np.random.uniform(-1, 1, self.shape).astype(np.float32)
299 300 301 302 303 304 305 306 307 308
        out = 1 / (1 + np.exp(-x))

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

309 310 311
    def init_shape(self):
        self.shape = [11, 17]

312 313 314
    def if_enable_cinn(self):
        self.enable_cinn = False

315 316
    def test_check_output(self):
        place = core.CUDAPlace(0)
317
        # elementwise_pow doesn't support bfloat16, skip check_prim here.
318
        self.check_output_with_place(place, check_prim=True)
319 320 321

    def test_check_grad(self):
        place = core.CUDAPlace(0)
322
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
323 324


325 326 327 328 329 330 331 332
'''
class TestSigmoidBF16_ZeroDim(TestSigmoidBF16):

    def init_shape(self):
        self.shape = []
'''


M
minghaoBD 已提交
333 334 335
class TestSilu(TestActivation):
    def setUp(self):
        self.op_type = "silu"
Z
zxcd 已提交
336 337
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.silu
338
        self.public_python_api = paddle.nn.functional.silu
M
minghaoBD 已提交
339
        self.init_dtype()
340
        self.init_shape()
341
        self.if_enable_cinn()
M
minghaoBD 已提交
342 343

        np.random.seed(1024)
344
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
M
minghaoBD 已提交
345
        out = x / (np.exp(-x) + 1)
346
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
M
minghaoBD 已提交
347 348
        self.outputs = {'Out': out}

349 350
        self.convert_input_output()

M
minghaoBD 已提交
351 352 353
    def init_dtype(self):
        self.dtype = np.float32

354
    def if_enable_cinn(self):
355 356
        pass

M
minghaoBD 已提交
357
    def test_check_grad(self):
Z
zxcd 已提交
358
        self.check_grad(['X'], 'Out', check_prim=True)
M
minghaoBD 已提交
359 360


361 362 363
class TestSilu_ZeroDim(TestSilu):
    def init_shape(self):
        self.shape = []
Z
zxcd 已提交
364

365
    def if_enable_cinn(self):
366
        self.enable_cinn = False
Z
zxcd 已提交
367 368


M
minghaoBD 已提交
369 370 371 372
class TestSiluAPI(unittest.TestCase):
    # test paddle.nn.Silu, paddle.nn.functional.silu
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
373 374 375
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
M
minghaoBD 已提交
376
            else paddle.CPUPlace()
377
        )
M
minghaoBD 已提交
378 379

    def test_static_api(self):
380
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
381
            with paddle.static.program_guard(paddle.static.Program()):
382
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
383 384 385 386 387 388 389 390
                out1 = F.silu(x)
                m = paddle.nn.Silu()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = self.x_np / (1 + np.exp(-self.x_np))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
M
minghaoBD 已提交
391 392 393 394 395 396 397 398

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.silu(x)
        m = paddle.nn.Silu()
        out2 = m(x)
        out_ref = self.x_np / (1 + np.exp(-self.x_np))
        for r in [out1, out2]:
399
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
M
minghaoBD 已提交
400 401

    def test_errors(self):
402
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
403 404 405 406
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.silu, 1)
                # The input dtype must be float16, float32, float64.
407
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
408 409 410 411
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.silu, x_int32)
                # support the input dtype is float16
412
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
413 414 415
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.silu(x_fp16)
M
minghaoBD 已提交
416 417


C
chengduo 已提交
418
class TestLogSigmoid(TestActivation):
419 420
    def setUp(self):
        self.op_type = "logsigmoid"
W
wanghuancoder 已提交
421
        self.python_api = paddle.nn.functional.log_sigmoid
422
        self.init_dtype()
423
        self.init_shape()
424

425
        np.random.seed(2048)
426
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
427
        out = np.log(1 / (1 + np.exp(-x)))
428
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
429
        self.outputs = {'Out': out}
430

431 432
        self.convert_input_output()

433
    def test_check_grad(self):
434 435
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
436
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
437 438


439 440 441 442 443
class TestLogSigmoid_ZeroDim(TestLogSigmoid):
    def init_shape(self):
        self.shape = []


444
class TestLogSigmoidAPI(unittest.TestCase):
445
    # test paddle.nn.LogSigmoid, paddle.nn.functional.log_sigmoid
446
    def setUp(self):
447
        np.random.seed(1024)
448
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
449 450 451
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
452
            else paddle.CPUPlace()
453
        )
454 455

    def test_static_api(self):
456
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
457
            with paddle.static.program_guard(paddle.static.Program()):
458
                x = paddle.static.data('X', [11, 17])
W
wanghuancoder 已提交
459 460 461 462 463 464 465 466
                out1 = F.log_sigmoid(x)
                m = paddle.nn.LogSigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
467 468 469

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
470
        out1 = F.log_sigmoid(x)
471 472 473 474
        m = paddle.nn.LogSigmoid()
        out2 = m(x)
        out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
        for r in [out1, out2]:
475
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
476 477

    def test_errors(self):
478
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
479 480 481 482
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.log_sigmoid, 1)
                # The input dtype must be float16, float32, float64.
483
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
484 485 486 487
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.log_sigmoid, x_int32)
                # support the input dtype is float16
488
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
489 490 491
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.log_sigmoid(x_fp16)
492 493


494
class TestTanh(TestActivation, TestParameter):
495 496
    def setUp(self):
        self.op_type = "tanh"
497
        self.prim_op_type = "prim"
W
wanghuancoder 已提交
498
        self.python_api = paddle.tanh
499
        self.public_python_api = paddle.tanh
500
        self.init_dtype()
501
        self.init_shape()
502
        self.if_enable_cinn()
503

504
        np.random.seed(1024)
505
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
506 507 508
        out = np.tanh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
509
        self.convert_input_output()
510 511

    def test_check_grad(self):
512 513
        if self.dtype == np.float16:
            return
514
        self.check_grad(['X'], 'Out', check_prim=True)
515

516
    def init_dtype(self):
517
        # TODO If dtype is float64, the output (Out) has diff at CPUPlace
518 519 520 521
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

522 523 524
    def if_enable_cinn(self):
        pass

525

526 527 528 529
class TestTanh_ZeroDim(TestTanh):
    def init_shape(self):
        self.shape = []

530 531 532
    def if_enable_cinn(self):
        self.enable_cinn = False

533

W
WangXi 已提交
534 535 536 537
class TestTanhAPI(unittest.TestCase):
    # test paddle.tanh, paddle.nn.tanh, paddle.nn.functional.tanh
    def setUp(self):
        self.dtype = 'float32'
538
        np.random.seed(1024)
W
WangXi 已提交
539
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
540 541 542
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
W
WangXi 已提交
543
            else paddle.CPUPlace()
544
        )
545 546 547 548
        self.executed_api()

    def executed_api(self):
        self.tanh = F.tanh
W
WangXi 已提交
549 550

    def test_static_api(self):
551
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
552
            with paddle.static.program_guard(paddle.static.Program()):
553
                x = paddle.static.data('X', [10, 12], self.dtype)
W
wanghuancoder 已提交
554 555 556 557 558 559 560 561
                out1 = self.tanh(x)
                th = paddle.nn.Tanh()
                out2 = th(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.tanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
W
WangXi 已提交
562 563

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
564
        x = paddle.to_tensor(self.x_np)
W
WangXi 已提交
565 566 567 568 569 570
        out1 = F.tanh(x)
        out2 = paddle.tanh(x)
        th = paddle.nn.Tanh()
        out3 = th(x)
        out_ref = np.tanh(self.x_np)
        for r in [out1, out2, out3]:
571
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
W
WangXi 已提交
572 573

    def test_errors(self):
574
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
575 576 577 578
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.tanh, 1)
                # The input dtype must be float16, float32.
579
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
580 581 582 583
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, self.tanh, x_int32)
                # support the input dtype is float16
584
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
585 586 587
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                self.tanh(x_fp16)
588 589 590 591 592 593


class TestTanhInplaceAPI(TestTanhAPI):
    # test paddle.tanh_
    def executed_api(self):
        self.tanh = paddle.tanh_
W
WangXi 已提交
594 595


596
class TestAtan(TestActivation, TestParameter):
597 598
    def setUp(self):
        self.op_type = "atan"
W
wanghuancoder 已提交
599
        self.python_api = paddle.atan
600
        self.init_dtype()
601
        self.init_shape()
602

603
        np.random.seed(1024)
604
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
605 606 607 608
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
609
        self.convert_input_output()
610 611 612 613

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
614
        self.check_grad(['X'], 'Out')
615

W
WuHaobo 已提交
616
    def test_out_name(self):
617
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
618 619 620 621 622 623 624 625 626 627 628
            with fluid.program_guard(fluid.Program()):
                np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
                data = paddle.static.data(
                    name="X", shape=[-1, 1], dtype="float32"
                )
                out = paddle.atan(data, name='Y')
                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                (result,) = exe.run(feed={"X": np_x}, fetch_list=[out])
                expected = np.arctan(np_x)
                self.assertEqual(result, expected)
W
WuHaobo 已提交
629

630 631 632 633 634 635 636 637
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

638

639
class TestAtan_ZeroDim(TestAtan):
640 641 642 643
    def init_shape(self):
        self.shape = []


644 645 646
class TestSinh(TestActivation):
    def setUp(self):
        self.op_type = "sinh"
W
wanghuancoder 已提交
647
        self.python_api = paddle.sinh
648
        self.init_dtype()
649
        self.init_shape()
650

651
        np.random.seed(1024)
652
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
653 654 655 656
        out = np.sinh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

657 658
        self.convert_input_output()

659 660 661 662 663
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

664 665 666 667 668 669 670

class TestSinh_ZeroDim(TestSinh):
    def init_shape(self):
        self.shape = []


class TestSinhAPI(unittest.TestCase):
671 672 673 674
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
675
            z = paddle.sinh(x).numpy()
676
            z_expected = np.sinh(np_x)
677
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
678 679

    def test_api(self):
680
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_sinh_out = paddle.sinh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_sinh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_sinh_out],
                )

            expected_res = np.sinh(input_x)
            np.testing.assert_allclose(np_sinh_res, expected_res, rtol=1e-05)
703 704 705 706

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
707 708 709
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
710 711
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
712
            loss = paddle.sinh(var)
713 714 715 716 717 718 719
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestSinhOpError(unittest.TestCase):
    def test_errors(self):
720
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
721 722 723 724
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.sinh, 1)
                # The input dtype must be float16, float32, float64.
725
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
726 727 728 729
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.sinh, x_int32)
                # support the input dtype is float16
730
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
731 732 733
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.sinh(x_fp16)
734 735 736 737 738


class TestCosh(TestActivation):
    def setUp(self):
        self.op_type = "cosh"
W
wanghuancoder 已提交
739
        self.python_api = paddle.cosh
740
        self.init_dtype()
741
        self.init_shape()
742

743
        np.random.seed(1024)
744
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
745 746 747 748
        out = np.cosh(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

749 750
        self.convert_input_output()

751 752 753 754 755
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

756 757 758 759 760 761 762

class TestCosh_ZeroDim(TestCosh):
    def init_shape(self):
        self.shape = []


class TestCoshAPI(unittest.TestCase):
763 764 765 766
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
767
            z = paddle.cosh(x).numpy()
768
            z_expected = np.cosh(np_x)
769
            np.testing.assert_allclose(z, z_expected, rtol=1e-05)
770 771

    def test_api(self):
772
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794
            test_data_shape = [11, 17]
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                    "float32"
                )
                data_x = paddle.static.data(
                    name="data_x",
                    shape=test_data_shape,
                    dtype="float32",
                )

                pd_cosh_out = paddle.cosh(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (np_cosh_res,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[pd_cosh_out],
                )

            expected_res = np.cosh(input_x)
            np.testing.assert_allclose(np_cosh_res, expected_res, rtol=1e-05)
795 796 797 798

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
799 800 801
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
802 803
            var = fluid.dygraph.to_variable(input_x)
            var.stop_gradient = False
804
            loss = paddle.cosh(var)
805 806 807 808 809 810 811
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


class TestCoshOpError(unittest.TestCase):
    def test_errors(self):
812
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
813 814 815 816
            with program_guard(Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.cosh, 1)
                # The input dtype must be float16, float32, float64.
817
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
818 819 820 821
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.cosh, x_int32)
                # support the input dtype is float16
822
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
823 824 825
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.cosh(x_fp16)
826 827


828 829 830 831 832 833
def ref_tanhshrink(x):
    out = x - np.tanh(x)
    return out


class TestTanhshrink(TestActivation):
K
Kavya Srinet 已提交
834 835
    def setUp(self):
        self.op_type = "tanh_shrink"
W
wanghuancoder 已提交
836
        self.python_api = paddle.nn.functional.tanhshrink
837
        self.init_dtype()
838
        self.init_shape()
839

840
        np.random.seed(1024)
841
        x = np.random.uniform(10, 20, self.shape).astype(self.dtype)
842
        out = ref_tanhshrink(x)
843
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
844
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
845

846 847
        self.convert_input_output()

K
Kavya Srinet 已提交
848
    def test_check_grad(self):
849 850
        if self.dtype == np.float16:
            return
851
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
852

853

854 855 856 857 858
class TestTanhshrink_ZeroDim(TestTanhshrink):
    def init_shape(self):
        self.shape = []


859 860 861
class TestTanhshrinkAPI(unittest.TestCase):
    # test paddle.nn.Tanhshrink, paddle.nn.functional.tanhshrink
    def setUp(self):
862
        np.random.seed(1024)
863
        self.x_np = np.random.uniform(10, 20, [10, 17]).astype(np.float64)
864 865 866
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
867
            else paddle.CPUPlace()
868
        )
869 870

    def test_static_api(self):
871
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
872
            with paddle.static.program_guard(paddle.static.Program()):
873
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
874 875 876 877 878 879 880 881
                out1 = F.tanhshrink(x)
                tanhshrink = paddle.nn.Tanhshrink()
                out2 = tanhshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_tanhshrink(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
882 883 884 885 886 887 888 889

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.tanhshrink(x)
        tanhshrink = paddle.nn.Tanhshrink()
        out2 = tanhshrink(x)
        out_ref = ref_tanhshrink(self.x_np)
        for r in [out1, out2]:
890
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
891 892

    def test_errors(self):
893
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
894 895 896 897
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.tanhshrink, 1)
                # The input dtype must be float16, float32, float64.
898
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
899 900 901 902
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.tanhshrink, x_int32)
                # support the input dtype is float16
903
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
904 905 906
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.tanhshrink(x_fp16)
907 908


909 910 911 912 913 914
def ref_hardshrink(x, threshold):
    out = np.copy(x)
    out[(out >= -threshold) & (out <= threshold)] = 0
    return out


C
chengduo 已提交
915
class TestHardShrink(TestActivation):
916 917
    def setUp(self):
        self.op_type = "hard_shrink"
W
wanghuancoder 已提交
918
        self.python_api = paddle.nn.functional.hardshrink
919
        self.init_dtype()
920
        self.init_shape()
921

922 923
        self.threshold = 0.5
        self.set_attrs()
924
        np.random.seed(1024)
925
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10
926
        out = ref_hardshrink(x, self.threshold)
927 928
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
929

930
        self.attrs = {'threshold': self.threshold}
931 932

        self.convert_input_output()
933

934 935 936
    def init_shape(self):
        self.shape = [10, 12]

937 938 939
    def set_attrs(self):
        pass

940
    def test_check_grad(self):
941 942
        if self.dtype == np.float16:
            return
943
        self.check_grad(['X'], 'Out')
944 945


946 947 948 949 950
class TestHardShrink_threshold_negative(TestHardShrink):
    def set_attrs(self):
        self.threshold = -0.1


951 952 953 954 955 956 957 958
'''
class TestHardShrink_ZeroDim(TestHardShrink):

    def init_shape(self):
        self.shape = []
'''


959 960 961
class TestHardShrinkAPI(unittest.TestCase):
    # test paddle.nn.Hardshrink, paddle.nn.functional.hardshrink
    def setUp(self):
962
        np.random.seed(1024)
963
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
964 965 966
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
967
            else paddle.CPUPlace()
968
        )
969 970

    def test_static_api(self):
971
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
972
            with paddle.static.program_guard(paddle.static.Program()):
973
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
974 975 976 977 978 979 980 981
                out1 = F.hardshrink(x)
                hd = paddle.nn.Hardshrink()
                out2 = hd(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardshrink(self.x_np, 0.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
982 983

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
984
        x = paddle.to_tensor(self.x_np)
985 986 987 988 989
        out1 = F.hardshrink(x)
        hd = paddle.nn.Hardshrink()
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.5)
        for r in [out1, out2]:
990
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
991 992 993 994 995 996

        out1 = F.hardshrink(x, 0.6)
        hd = paddle.nn.Hardshrink(0.6)
        out2 = hd(x)
        out_ref = ref_hardshrink(self.x_np, 0.6)
        for r in [out1, out2]:
997
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
998

999
    def test_errors(self):
1000
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1001 1002 1003 1004
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardshrink, 1)
                # The input dtype must be float16, float32, float64.
1005
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1006 1007 1008 1009
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardshrink, x_int32)
                # support the input dtype is float16
1010
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1011 1012 1013
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardshrink(x_fp16)
1014 1015


1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
def ref_hardtanh(x, min=-1.0, max=1.0):
    out = np.copy(x)
    out[np.abs(x - min) < 0.005] = min + 0.02
    out[np.abs(x - max) < 0.005] = max + 0.02
    out = np.minimum(np.maximum(x, min), max)
    return out


class TestHardtanhAPI(unittest.TestCase):
    # test paddle.nn.Hardtanh, paddle.nn.functional.hardtanh
    def setUp(self):
1027
        np.random.seed(1024)
1028
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
1029 1030 1031
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1032
            else paddle.CPUPlace()
1033
        )
1034 1035

    def test_static_api(self):
1036
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1037
            with paddle.static.program_guard(paddle.static.Program()):
1038
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1039 1040 1041 1042 1043 1044 1045 1046
                out1 = F.hardtanh(x)
                m = paddle.nn.Hardtanh()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardtanh(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1047 1048

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
1049
        x = paddle.to_tensor(self.x_np)
1050 1051 1052 1053 1054
        out1 = F.hardtanh(x)
        m = paddle.nn.Hardtanh()
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np)
        for r in [out1, out2]:
1055
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1056 1057 1058 1059 1060 1061

        out1 = F.hardtanh(x, -2.0, 2.0)
        m = paddle.nn.Hardtanh(-2.0, 2.0)
        out2 = m(x)
        out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
        for r in [out1, out2]:
1062
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1063 1064

    def test_errors(self):
1065
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1066 1067 1068 1069
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardtanh, 1)
                # The input dtype must be float16, float32, float64.
1070
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1071 1072 1073 1074
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardtanh, x_int32)
                # support the input dtype is float16
1075
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1076 1077 1078
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardtanh(x_fp16)
1079 1080


1081 1082 1083
def ref_softshrink(x, threshold=0.5):
    out = np.copy(x)
    out = (out < -threshold) * (out + threshold) + (out > threshold) * (
1084 1085
        out - threshold
    )
1086 1087 1088 1089
    return out


class TestSoftshrink(TestActivation):
1090 1091
    def setUp(self):
        self.op_type = "softshrink"
1092
        self.python_api = paddle.nn.functional.softshrink
1093
        self.init_dtype()
1094
        self.init_shape()
1095

1096
        threshold = 0.8
1097

1098
        np.random.seed(1023)
1099
        x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype)
1100
        out = ref_softshrink(x, threshold)
1101 1102

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
1103
        self.outputs = {'Out': out}
1104

1105 1106
        self.attrs = {"lambda": threshold}

1107
    def test_check_grad(self):
1108 1109
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1110
        self.check_grad(['X'], 'Out')
1111

1112

1113 1114 1115 1116 1117
class TestSoftshrink_ZeroDim(TestSoftshrink):
    def init_shape(self):
        self.shape = []


1118 1119 1120 1121
class TestSoftshrinkAPI(unittest.TestCase):
    # test paddle.nn.Softshrink, paddle.nn.functional.softshrink
    def setUp(self):
        self.threshold = 0.8
1122
        np.random.seed(1024)
1123
        self.x_np = np.random.uniform(0.25, 10, [10, 12]).astype(np.float64)
1124 1125 1126
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1127
            else paddle.CPUPlace()
1128
        )
1129 1130

    def test_static_api(self):
1131
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1132
            with paddle.static.program_guard(paddle.static.Program()):
1133
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
1134 1135 1136 1137 1138 1139 1140 1141
                out1 = F.softshrink(x, self.threshold)
                softshrink = paddle.nn.Softshrink(self.threshold)
                out2 = softshrink(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softshrink(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1142 1143 1144 1145 1146 1147 1148 1149

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softshrink(x, self.threshold)
        softshrink = paddle.nn.Softshrink(self.threshold)
        out2 = softshrink(x)
        out_ref = ref_softshrink(self.x_np, self.threshold)
        for r in [out1, out2]:
1150
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1151

1152
    def test_errors(self):
1153
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1154 1155 1156 1157
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softshrink, 1)
                # The input dtype must be float16, float32, float64.
1158
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1159 1160 1161 1162
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softshrink, x_int32)
                # The threshold must be no less than zero
1163
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
1164 1165 1166 1167
                    name='x_fp32', shape=[12, 10], dtype='float32'
                )
                self.assertRaises(ValueError, F.softshrink, x_fp32, -1.0)
                # support the input dtype is float16
1168
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1169 1170 1171
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softshrink(x_fp16)
1172 1173


1174
class TestSqrt(TestActivation, TestParameter):
1175 1176
    def setUp(self):
        self.op_type = "sqrt"
1177
        self.prim_op_type = "prim"
1178
        self.python_api = paddle.sqrt
1179 1180
        self.public_python_api = paddle.sqrt

1181
        self.init_dtype()
1182
        self.init_shape()
1183
        self.if_enable_cinn()
1184

1185
        np.random.seed(1023)
1186
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
1187 1188 1189 1190
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1191
        self.convert_input_output()
1192

1193 1194 1195
    def if_enable_cinn(self):
        pass

1196
    def test_check_grad(self):
1197 1198
        if self.dtype == np.float16:
            return
1199
        self.check_grad(['X'], 'Out', check_prim=True)
1200 1201

    def test_check_output(self):
W
wanghuancoder 已提交
1202
        self.check_output()
1203

1204

1205 1206 1207 1208 1209
class TestSqrtPrimFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "prim"
        self.python_api = paddle.sqrt
1210
        self.public_python_api = paddle.sqrt
1211 1212
        self.init_dtype()
        self.init_shape()
1213
        self.if_enable_cinn()
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1224
        self.check_grad(['X'], 'Out', check_prim=True)
1225 1226

    def test_check_output(self):
W
wanghuancoder 已提交
1227
        self.check_output()
1228 1229 1230 1231

    def init_dtype(self):
        self.dtype = np.float32

1232 1233 1234
    def if_enable_cinn(self):
        pass

1235

1236 1237 1238 1239
class TestSqrt_ZeroDim(TestSqrt):
    def init_shape(self):
        self.shape = []

1240
    def if_enable_cinn(self):
1241
        self.enable_cinn = False
1242 1243


1244
@unittest.skipIf(
R
ronnywang 已提交
1245 1246
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
1247
)
1248 1249 1250
class TestSqrtBF16(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
1251
        self.prim_op_type = "prim"
1252
        self.python_api = paddle.sqrt
1253
        self.public_python_api = paddle.sqrt
1254
        self.init_dtype()
1255
        self.init_shape()
1256
        self.if_enable_cinn()
1257 1258

        np.random.seed(1023)
1259
        x = np.random.uniform(0.1, 1, self.shape).astype(np.float32)
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
        out = np.sqrt(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

1270 1271 1272
    def init_shape(self):
        self.shape = [11, 17]

1273 1274 1275
    def if_enable_cinn(self):
        self.enable_cinn = False

1276 1277
    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1278
        self.check_output_with_place(place)
1279 1280 1281

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
1282
        self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
1283 1284


M
mhy-666 已提交
1285 1286 1287 1288 1289
class TestSqrtComp(TestActivation, TestParameter):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1290
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1291 1292
        self.init_dtype()
        self.init_shape()
1293
        self.if_enable_cinn()
M
mhy-666 已提交
1294 1295 1296 1297 1298 1299 1300

        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1301
        self.convert_input_output()
1302 1303 1304

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)


class TestSqrtCompFp32(TestActivation):
    def setUp(self):
        self.op_type = "sqrt"
        self.prim_op_type = "comp"
        self.python_api = paddle.sqrt
1320
        self.public_python_api = paddle.sqrt
M
mhy-666 已提交
1321 1322
        self.init_dtype()
        self.init_shape()
1323
        self.if_enable_cinn()
M
mhy-666 已提交
1324 1325 1326 1327 1328 1329
        np.random.seed(1023)
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1330 1331 1332

    def if_enable_cinn(self):
        pass
M
mhy-666 已提交
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=True, check_prim=True)

    def test_check_output(self):
        self.check_output(check_dygraph=True, check_prim=True)

    def init_dtype(self):
        self.dtype = np.float32


Z
zhoukunsheng 已提交
1346 1347 1348
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
1349
        self.prim_op_type = "comp"
Z
zyfncg 已提交
1350
        self.python_api = paddle.rsqrt
1351
        self.public_python_api = paddle.rsqrt
Z
zhoukunsheng 已提交
1352
        self.init_dtype()
1353
        self.init_shape()
1354
        self.if_enable_cinn()
Z
zhoukunsheng 已提交
1355

1356
        np.random.seed(1024)
1357
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
Z
zhoukunsheng 已提交
1358 1359 1360 1361
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1362
        self.convert_input_output()
Z
zhoukunsheng 已提交
1363

1364 1365 1366
    def init_shape(self):
        self.shape = [10, 12]

1367 1368 1369
    def if_enable_cinn(self):
        pass

1370 1371 1372
    def test_check_output(self):
        self.check_output(check_prim=True)

Z
zhoukunsheng 已提交
1373 1374 1375
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1376 1377 1378 1379 1380 1381
        self.check_grad(
            ['X'],
            'Out',
            max_relative_error=0.0005,
            check_prim=True,
        )
Z
zhoukunsheng 已提交
1382 1383


1384 1385 1386
class TestRsqrt_ZeroDim(TestRsqrt):
    def init_shape(self):
        self.shape = []
1387 1388 1389

    def if_enable_cinn(self):
        self.enable_cinn = False
1390 1391


C
chengduo 已提交
1392
class TestAbs(TestActivation):
1393 1394
    def setUp(self):
        self.op_type = "abs"
1395 1396
        self.prim_op_type = "prim"
        self.python_api = paddle.abs
1397
        self.public_python_api = paddle.abs
1398
        self.init_dtype()
1399
        self.init_shape()
1400
        self.if_enable_cinn()
1401

1402
        np.random.seed(1024)
1403
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
C
chengduo 已提交
1404
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
1405
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
1406
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
1407 1408
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
1409 1410 1411 1412
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1413
        self.convert_input_output()
1414

1415 1416 1417
    def init_shape(self):
        self.shape = [4, 25]

1418 1419 1420
    def if_enable_cinn(self):
        pass

1421
    def test_check_grad(self):
1422 1423
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
1424
        self.check_grad(['X'], 'Out', check_prim=True)
1425

1426

1427 1428 1429 1430
class TestAbs_ZeroDim(TestAbs):
    def init_shape(self):
        self.shape = []

1431 1432 1433
    def if_enable_cinn(self):
        self.enable_cinn = False

1434

C
chengduo 已提交
1435
class TestCeil(TestActivation):
D
dzhwinter 已提交
1436 1437
    def setUp(self):
        self.op_type = "ceil"
1438
        self.python_api = paddle.ceil
1439
        self.init_dtype()
1440
        self.init_shape()
1441

1442
        np.random.seed(1024)
1443
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1444 1445 1446 1447
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1448
        self.convert_input_output()
D
dzhwinter 已提交
1449

1450 1451 1452
    def init_shape(self):
        self.shape = [10, 12]

D
dzhwinter 已提交
1453
    # The same reason with TestFloor
C
chengduo 已提交
1454
    def test_check_grad(self):
1455 1456 1457
        pass


1458 1459 1460 1461 1462
class TestCeil_ZeroDim(TestCeil):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1463
class TestFloor(TestActivation):
D
dzhwinter 已提交
1464 1465
    def setUp(self):
        self.op_type = "floor"
1466
        self.prim_op_type = "prim"
1467
        self.python_api = paddle.floor
1468
        self.public_python_api = paddle.floor
1469
        self.init_dtype()
1470
        self.init_shape()
1471
        self.if_enable_cinn()
1472

1473
        np.random.seed(1024)
1474
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1475 1476 1477 1478
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1479
        self.convert_input_output()
D
dzhwinter 已提交
1480

1481 1482 1483
    def init_shape(self):
        self.shape = [10, 12]

1484 1485 1486
    def if_enable_cinn(self):
        pass

D
dzhwinter 已提交
1487
    # the gradient on floor, ceil, round is undefined.
1488
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
1489 1490
    # The same reason with TestFloor
    def test_check_grad(self):
1491 1492
        pass

1493
    def test_check_grad_for_prim(self):
1494 1495 1496 1497
        # the gradient on floor, ceil, round is undefined.
        # we return zero as gradient, but the numpy return nan.
        # for prim, we compare result with eager python api,
        # so, we use only_prim flag to express we only test prim.
1498 1499 1500 1501 1502 1503 1504 1505
        if core.is_compiled_with_cuda():
            self.check_grad_with_place(
                paddle.CUDAPlace(0),
                ['X'],
                'Out',
                check_prim=True,
                only_check_prim=True,
            )
1506 1507


1508
class TestFloor_ZeroDim(TestFloor):
1509 1510 1511
    def init_shape(self):
        self.shape = []

1512 1513
    def if_enable_cinn(self):
        self.enable_cinn = False
1514 1515


C
chengduo 已提交
1516
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
1517 1518
    def setUp(self):
        self.op_type = "cos"
W
wanghuancoder 已提交
1519
        self.python_api = paddle.cos
1520 1521
        self.public_python_api = paddle.cos
        self.prim_op_type = "prim"
1522
        self.init_dtype()
1523
        self.init_shape()
1524 1525
        # prim not support now
        self.enable_cinn = False
1526

1527
        np.random.seed(1024)
1528
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1529 1530 1531
        out = np.cos(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1532
        self.convert_input_output()
C
add sin  
chengduoZH 已提交
1533

1534 1535 1536
    def init_shape(self):
        self.shape = [10, 12]

C
add sin  
chengduoZH 已提交
1537
    def test_check_grad(self):
1538 1539
        if self.dtype == np.float16:
            return
1540
        self.check_grad(['X'], 'Out', check_prim=True)
C
add sin  
chengduoZH 已提交
1541

1542

1543 1544 1545 1546 1547
class TestCos_ZeroDim(TestCos):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
1548 1549 1550 1551
class TestTan(TestActivation):
    def setUp(self):
        np.random.seed(1024)
        self.op_type = "tan"
W
wanghuancoder 已提交
1552
        self.python_api = paddle.tan
J
joejiong 已提交
1553
        self.init_dtype()
1554 1555
        self.init_shape()

J
joejiong 已提交
1556
        self.dtype = 'float32'
1557
        self.x_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1558 1559 1560
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
J
joejiong 已提交
1561
            else paddle.CPUPlace()
1562
        )
J
joejiong 已提交
1563 1564 1565 1566 1567

        out = np.tan(self.x_np)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)}
        self.outputs = {'Out': out}
1568
        self.convert_input_output()
J
joejiong 已提交
1569

1570 1571 1572
    def init_shape(self):
        self.shape = [10, 12]

J
joejiong 已提交
1573 1574 1575 1576 1577
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')

1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588

class TestTan_ZeroDim(TestTan):
    def init_shape(self):
        self.shape = []


class TestTanAPI(unittest.TestCase):
    def setUp(self):
        np.random.seed(1024)
        self.dtype = 'float32'
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
1589 1590 1591
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1592
            else paddle.CPUPlace()
1593
        )
1594

J
joejiong 已提交
1595 1596 1597 1598
    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out_test = paddle.tan(x)
        out_ref = np.tan(self.x_np)
1599
        np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
J
joejiong 已提交
1600 1601

    def test_static_api(self):
1602
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1603 1604 1605 1606 1607 1608 1609
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', [11, 17], self.dtype)
                out = paddle.tan(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = np.tan(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
J
joejiong 已提交
1610 1611 1612 1613

    def test_backward(self):
        test_data_shape = [11, 17]
        with fluid.dygraph.guard():
1614 1615 1616
            input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
                "float32"
            )
J
joejiong 已提交
1617 1618 1619 1620 1621 1622 1623 1624
            var = paddle.to_tensor(input_x)
            var.stop_gradient = False
            loss = paddle.tan(var)
            loss.backward()
            grad_var = var.gradient()
            self.assertEqual(grad_var.shape, input_x.shape)


1625 1626 1627
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
W
wanghuancoder 已提交
1628
        self.python_api = paddle.acos
1629
        self.init_dtype()
1630
        self.init_shape()
1631

1632
        np.random.seed(1024)
1633
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1634 1635 1636 1637
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1638
        self.convert_input_output()
1639

1640 1641 1642
    def init_shape(self):
        self.shape = [10, 12]

1643 1644 1645
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1646
        self.check_grad(['X'], 'Out')
1647 1648


1649 1650 1651 1652 1653
class TestAcos_ZeroDim(TestAcos):
    def init_shape(self):
        self.shape = []


1654
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
1655 1656
    def setUp(self):
        self.op_type = "sin"
W
wanghuancoder 已提交
1657
        self.python_api = paddle.sin
1658 1659
        self.public_python_api = paddle.sin
        self.prim_op_type = "prim"
1660
        self.init_dtype()
1661
        self.init_shape()
1662 1663
        # prim not support now
        self.enable_cinn = False
1664

1665
        np.random.seed(1024)
1666
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1667 1668 1669
        out = np.sin(x)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1670
        self.convert_input_output()
C
add cos  
chengduoZH 已提交
1671

1672 1673 1674
    def init_shape(self):
        self.shape = [10, 12]

C
add cos  
chengduoZH 已提交
1675
    def test_check_grad(self):
1676 1677
        if self.dtype == np.float16:
            return
1678
        self.check_grad(['X'], 'Out', check_prim=True)
C
add cos  
chengduoZH 已提交
1679 1680


1681 1682 1683 1684 1685
class TestSin_ZeroDim(TestSin):
    def init_shape(self):
        self.shape = []


1686 1687 1688
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
W
wanghuancoder 已提交
1689
        self.python_api = paddle.asin
1690
        self.init_dtype()
1691
        self.init_shape()
1692

1693
        np.random.seed(2048)
1694
        x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype)
1695 1696 1697 1698
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1699
        self.convert_input_output()
1700

1701 1702 1703
    def init_shape(self):
        self.shape = [10, 12]

1704 1705 1706
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1707
        self.check_grad(['X'], 'Out')
1708 1709


1710 1711 1712 1713 1714
class TestAsin_ZeroDim(TestAsin):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1715 1716 1717
class TestAcosh(TestActivation):
    def setUp(self):
        self.op_type = "acosh"
W
wanghuancoder 已提交
1718
        self.python_api = paddle.acosh
X
xiaoting 已提交
1719
        self.init_dtype()
1720
        self.init_shape()
X
xiaoting 已提交
1721 1722

        np.random.seed(1024)
1723
        x = np.random.uniform(2, 3, self.shape).astype(self.dtype)
X
xiaoting 已提交
1724 1725 1726 1727
        out = np.arccosh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1728
        self.convert_input_output()
X
xiaoting 已提交
1729

1730 1731 1732
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1733 1734 1735 1736 1737 1738
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1739 1740 1741 1742 1743
class TestAcosh_ZeroDim(TestAcosh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1744 1745 1746
class TestAsinh(TestActivation):
    def setUp(self):
        self.op_type = "asinh"
W
wanghuancoder 已提交
1747
        self.python_api = paddle.asinh
X
xiaoting 已提交
1748
        self.init_dtype()
1749
        self.init_shape()
X
xiaoting 已提交
1750 1751

        np.random.seed(1024)
1752
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
X
xiaoting 已提交
1753 1754 1755 1756
        out = np.arcsinh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1757
        self.convert_input_output()
X
xiaoting 已提交
1758

1759 1760 1761
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1762 1763 1764 1765 1766 1767
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1768 1769 1770 1771 1772
class TestAsinh_ZeroDim(TestAsinh):
    def init_shape(self):
        self.shape = []


X
xiaoting 已提交
1773 1774 1775
class TestAtanh(TestActivation):
    def setUp(self):
        self.op_type = "atanh"
W
wanghuancoder 已提交
1776
        self.python_api = paddle.atanh
X
xiaoting 已提交
1777
        self.init_dtype()
1778
        self.init_shape()
X
xiaoting 已提交
1779 1780

        np.random.seed(400)
1781
        x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype)
X
xiaoting 已提交
1782 1783 1784 1785
        out = np.arctanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1786
        self.convert_input_output()
X
xiaoting 已提交
1787

1788 1789 1790
    def init_shape(self):
        self.shape = [10, 12]

X
xiaoting 已提交
1791 1792 1793 1794 1795 1796
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


1797 1798 1799 1800 1801
class TestAtanh_ZeroDim(TestAtanh):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1802
class TestRound(TestActivation):
D
dzhwinter 已提交
1803 1804
    def setUp(self):
        self.op_type = "round"
1805
        self.python_api = paddle.round
1806
        self.init_dtype()
1807
        self.init_shape()
1808

1809
        np.random.seed(1024)
1810
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
1811 1812 1813 1814
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
1815
        self.convert_input_output()
D
dzhwinter 已提交
1816

1817 1818 1819
    def init_shape(self):
        self.shape = [10, 12]

C
chengduo 已提交
1820
    def test_check_grad(self):
1821 1822 1823
        pass


1824 1825 1826 1827 1828
class TestRound_ZeroDim(TestRound):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
1829
class TestRelu(TestActivation):
1830
    def setUp(self):
Q
qijun 已提交
1831
        self.op_type = "relu"
K
Kang Zhao 已提交
1832 1833
        self.python_api = paddle.nn.functional.relu
        self.prim_op_type = "comp"
1834
        self.public_python_api = paddle.nn.functional.relu
K
Kexin Zhao 已提交
1835
        self.init_dtype()
1836
        self.init_shape()
1837
        self.if_enable_cinn()
K
Kexin Zhao 已提交
1838

1839
        np.random.seed(1024)
1840 1841 1842 1843 1844
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0)
        self.inputs = {'X': x}
K
Kexin Zhao 已提交
1845 1846

        self.outputs = {'Out': out}
1847
        self.convert_input_output()
1848 1849

    def test_check_grad(self):
K
Kexin Zhao 已提交
1850 1851
        if self.dtype == np.float16:
            return
K
Kang Zhao 已提交
1852 1853 1854 1855 1856
        self.check_grad(['X'], 'Out', check_prim=True)

    def test_check_output(self):
        self.check_output(check_prim=True)

1857 1858
    def if_enable_cinn(self):
        pass
A
Adam 已提交
1859 1860


1861 1862 1863 1864
class TestRelu_ZeroDim(TestRelu):
    def init_shape(self):
        self.shape = []

1865
    def if_enable_cinn(self):
K
Kang Zhao 已提交
1866 1867
        self.enable_cinn = False

1868

1869 1870 1871
class TestReluAPI(unittest.TestCase):
    # test paddle.nn.ReLU, paddle.nn.functional.relu
    def setUp(self):
1872
        np.random.seed(1024)
1873
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
1874 1875 1876
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
1877
            else paddle.CPUPlace()
1878
        )
1879 1880 1881 1882
        self.executed_api()

    def executed_api(self):
        self.relu = F.relu
1883 1884

    def test_static_api(self):
1885
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1886
            with paddle.static.program_guard(paddle.static.Program()):
1887
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
1888 1889 1890 1891 1892 1893 1894 1895
                out1 = self.relu(x)
                m = paddle.nn.ReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = np.maximum(self.x_np, 0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
1896 1897 1898 1899

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.ReLU()
1900 1901
        out1 = m(x)
        out2 = self.relu(x)
1902 1903
        out_ref = np.maximum(self.x_np, 0)
        for r in [out1, out2]:
1904
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
1905

1906
    def test_errors(self):
1907 1908
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
1909 1910 1911 1912
                with paddle.static.program_guard(paddle.static.Program()):
                    # The input type must be Variable.
                    self.assertRaises(TypeError, self.relu, 1)
                    # The input dtype must be float16, float32, float64.
1913
                    x_int32 = paddle.static.data(
W
wanghuancoder 已提交
1914 1915 1916 1917
                        name='x_int32', shape=[10, 12], dtype='int32'
                    )
                    self.assertRaises(TypeError, self.relu, x_int32)
                    # support the input dtype is float16
1918
                    x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
1919 1920 1921
                        name='x_fp16', shape=[10, 12], dtype='float16'
                    )
                    self.relu(x_fp16)
1922 1923 1924 1925 1926 1927


class TestReluInplaceAPI(TestReluAPI):
    # test paddle.nn.functional.relu_
    def executed_api(self):
        self.relu = F.relu_
1928 1929


1930 1931 1932 1933 1934 1935
def ref_leaky_relu(x, alpha=0.01):
    out = np.copy(x)
    out[out < 0] *= alpha
    return out


A
Adam 已提交
1936
class TestLeakyRelu(TestActivation):
1937 1938 1939
    def get_alpha(self):
        return 0.02

A
Adam 已提交
1940 1941
    def setUp(self):
        self.op_type = "leaky_relu"
W
wanghuancoder 已提交
1942
        self.python_api = paddle.nn.functional.leaky_relu
1943 1944
        self.public_python_api = paddle.nn.functional.leaky_relu
        self.prim_op_type = "comp"
A
Adam 已提交
1945
        self.init_dtype()
1946
        self.init_shape()
1947
        self.if_enable_cinn()
1948
        alpha = self.get_alpha()
A
Adam 已提交
1949

1950
        np.random.seed(1024)
1951
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
A
Adam 已提交
1952
        # The same reason with TestAbs
1953 1954
        x[np.abs(x) < 0.005] = 0.05
        out = ref_leaky_relu(x, alpha)
A
Adam 已提交
1955

1956
        self.inputs = {'X': x}
A
Adam 已提交
1957
        self.outputs = {'Out': out}
1958
        self.attrs = {'alpha': alpha}
1959
        self.convert_input_output()
A
Adam 已提交
1960

1961 1962 1963
    def if_enable_cinn(self):
        pass

1964 1965 1966
    def test_check_output(self):
        self.check_output(check_prim=True)

A
Adam 已提交
1967 1968 1969
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
1970
        self.check_grad(['X'], 'Out', check_prim=True)
1971 1972


1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987
class TestLeakyReluAlpha1(TestLeakyRelu):
    def get_alpha(self):
        return 2


class TestLeakyReluAlpha2(TestLeakyRelu):
    def get_alpha(self):
        return -0.01


class TestLeakyReluAlpha3(TestLeakyRelu):
    def get_alpha(self):
        return -2.0


1988 1989 1990 1991
class TestLeakyRelu_ZeroDim(TestLeakyRelu):
    def init_shape(self):
        self.shape = []

1992
    def if_enable_cinn(self):
1993 1994
        self.enable_cinn = False

1995

1996 1997 1998
class TestLeakyReluAPI(unittest.TestCase):
    # test paddle.nn.LeakyReLU, paddle.nn.functional.leaky_relu,
    def setUp(self):
1999
        np.random.seed(1024)
2000
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
2001 2002 2003
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2004
            else paddle.CPUPlace()
2005
        )
2006 2007

    def test_static_api(self):
2008
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2009
            with paddle.static.program_guard(paddle.static.Program()):
2010
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2011 2012 2013 2014 2015 2016 2017 2018
                out1 = F.leaky_relu(x)
                m = paddle.nn.LeakyReLU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_leaky_relu(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2019 2020

    def test_dygraph_api(self):
Z
Zhou Wei 已提交
2021
        x = paddle.to_tensor(self.x_np)
2022 2023 2024 2025 2026
        out1 = F.leaky_relu(x)
        m = paddle.nn.LeakyReLU()
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np)
        for r in [out1, out2]:
2027
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2028 2029 2030 2031 2032 2033

        out1 = F.leaky_relu(x, 0.6)
        m = paddle.nn.LeakyReLU(0.6)
        out2 = m(x)
        out_ref = ref_leaky_relu(self.x_np, 0.6)
        for r in [out1, out2]:
2034
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2035

2036
    def test_errors(self):
2037
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2038 2039 2040 2041
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.leaky_relu, 1)
                # The input dtype must be float16, float32, float64.
2042
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2043 2044 2045 2046
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.leaky_relu, x_int32)
                # support the input dtype is float16
2047
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2048 2049 2050
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.leaky_relu(x_fp16)
2051 2052


2053 2054
def gelu(x, approximate):
    if approximate:
2055 2056 2057 2058 2059 2060 2061 2062
        y_ref = (
            0.5
            * x
            * (
                1.0
                + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))
            )
        )
2063 2064 2065 2066 2067 2068
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
2069 2070
    def setUp(self):
        self.op_type = "gelu"
2071 2072
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2073
        self.public_python_api = paddle.nn.functional.gelu
C
Clementine 已提交
2074
        self.init_dtype()
2075
        self.init_shape()
2076
        approximate = True
2077
        np.random.seed(1024)
2078
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2079
        out = gelu(x, approximate)
C
Clementine 已提交
2080

2081
        self.inputs = {'X': x}
2082 2083 2084
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

2085 2086 2087 2088
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu device, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2089 2090 2091
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
cxxly 已提交
2092

2093 2094 2095
    def test_check_output(self):
        self.check_output(check_prim=True)

2096 2097 2098
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2099
        self.check_grad(['X'], 'Out', check_prim=True)
2100 2101 2102 2103 2104


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
2105 2106
        self.prim_op_type = "comp"
        self.python_api = paddle.nn.functional.gelu
2107
        self.public_python_api = paddle.nn.functional.gelu
2108
        self.init_dtype()
2109
        self.init_shape()
2110
        approximate = False
2111
        np.random.seed(2048)
2112
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
2113
        out = gelu(x, approximate)
2114
        self.if_enable_cinn()
C
Clementine 已提交
2115

2116
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
C
Clementine 已提交
2117
        self.outputs = {'Out': out}
2118
        self.convert_input_output()
2119
        self.attrs = {"approximate": approximate}
2120 2121 2122 2123
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2124 2125 2126
        # Cumulative error occurs between comp and cinn, so that we also set cinn_rtol to 1e-8 as rev_comp_rtol = 1e-8
        self.cinn_rtol = 1e-8
        self.cinn_atol = 1e-8
C
Clementine 已提交
2127

2128
    def if_enable_cinn(self):
2129
        pass
2130 2131 2132 2133

    def test_check_output(self):
        self.check_output(check_prim=True)

C
Clementine 已提交
2134 2135 2136
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
2137
        self.check_grad(['X'], 'Out', check_prim=True)
C
Clementine 已提交
2138 2139


2140 2141 2142 2143
class TestGelu_ZeroDim(TestGelu):
    def init_shape(self):
        self.shape = []

2144 2145 2146
    def if_enable_cinn(self):
        self.enable_cinn = False

2147

2148 2149 2150
class TestGELUAPI(unittest.TestCase):
    # test paddle.nn.GELU, paddle.nn.functional.gelu
    def setUp(self):
2151
        np.random.seed(1024)
2152
        self.x_np = np.random.uniform(-1, 1, [11, 17]).astype('float32')
2153 2154 2155
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2156
            else paddle.CPUPlace()
2157
        )
C
cxxly 已提交
2158 2159
        self.enable_cinn = False

2160 2161 2162 2163
        # The backward decomposite of gelu is inconsistent with raw kernel on
        # cpu, lower threshold to support 1e-8 for pass the unittest
        self.rev_comp_rtol = 1e-8
        self.rev_comp_atol = 1e-8
2164 2165

    def test_static_api(self):
2166
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2167
            with paddle.static.program_guard(paddle.static.Program()):
2168
                x = paddle.static.data('X', [11, 17], dtype="float32")
W
wanghuancoder 已提交
2169 2170 2171 2172 2173 2174 2175 2176
                out1 = F.gelu(x)
                m = paddle.nn.GELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = gelu(self.x_np, False)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2177 2178 2179 2180 2181 2182 2183 2184

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.gelu(x)
        m = paddle.nn.GELU()
        out2 = m(x)
        out_ref = gelu(self.x_np, False)
        for r in [out1, out2]:
2185
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2186 2187 2188 2189 2190 2191

        out1 = F.gelu(x, True)
        m = paddle.nn.GELU(True)
        out2 = m(x)
        out_ref = gelu(self.x_np, True)
        for r in [out1, out2]:
2192
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2193 2194

    def test_errors(self):
2195
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2196 2197 2198 2199
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.gelu, 1)
                # The input dtype must be float16, float32, float64.
2200
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2201 2202 2203 2204
                    name='x_int32', shape=[11, 17], dtype='int32'
                )
                self.assertRaises(TypeError, F.gelu, x_int32)
                # support the input dtype is float16
2205
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2206 2207 2208
                    name='x_fp16', shape=[11, 17], dtype='float16'
                )
                F.gelu(x_fp16)
2209 2210


C
chengduo 已提交
2211
class TestBRelu(TestActivation):
2212 2213
    def setUp(self):
        self.op_type = "brelu"
W
wanghuancoder 已提交
2214
        self.python_api = paddle.nn.functional.hardtanh
2215 2216
        self.init_dtype()

2217
        np.random.seed(1024)
Z
zhupengyang 已提交
2218
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2219 2220
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
2221 2222
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
2223
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
2224 2225 2226
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
2227 2228

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
F
fengjiayi 已提交
2229
        self.outputs = {'Out': t}
2230 2231
        self.convert_input_output()
        self.attrs = {'t_min': t_min, 't_max': t_max}
2232 2233

    def test_check_grad(self):
2234 2235
        if self.dtype == np.float16:
            return
2236
        self.check_grad(['X'], 'Out')
2237

2238

2239 2240 2241 2242 2243 2244 2245
def ref_relu6(x, threshold=6.0):
    out = np.copy(x)
    out[np.abs(x - threshold) < 0.005] = threshold + 0.02
    out = np.minimum(np.maximum(x, 0), threshold)
    return out


C
chengduo 已提交
2246
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
2247
    def setUp(self):
2248
        self.op_type = "relu6"
2249
        self.init_dtype()
2250
        self.init_shape()
2251
        self.python_api = paddle.nn.functional.relu6
2252

2253
        np.random.seed(1024)
2254
        x = np.random.uniform(-1, 10, self.shape).astype(self.dtype)
2255
        x[np.abs(x) < 0.005] = 0.02
2256
        out = ref_relu6(x)
2257

2258
        self.attrs = {'threshold': 6.0}
2259 2260

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2261
        self.outputs = {'Out': out}
2262
        self.convert_input_output()
K
Kavya Srinet 已提交
2263

2264 2265 2266
    def init_shape(self):
        self.shape = [10, 12]

2267 2268 2269
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2270
        self.check_grad(['X'], 'Out')
2271 2272


2273 2274 2275 2276 2277
class TestRelu6_ZeroDim(TestRelu6):
    def init_shape(self):
        self.shape = []


2278 2279 2280
class TestRelu6API(unittest.TestCase):
    # test paddle.nn.ReLU6, paddle.nn.functional.relu6
    def setUp(self):
2281
        np.random.seed(1024)
2282 2283
        self.x_np = np.random.uniform(-1, 10, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
2284 2285 2286
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2287
            else paddle.CPUPlace()
2288
        )
2289 2290

    def test_static_api(self):
2291
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2292
            with paddle.static.program_guard(paddle.static.Program()):
2293
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2294 2295 2296 2297 2298 2299 2300 2301
                out1 = F.relu6(x)
                relu6 = paddle.nn.ReLU6()
                out2 = relu6(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_relu6(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2302 2303 2304 2305 2306 2307 2308 2309

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.relu6(x)
        relu6 = paddle.nn.ReLU6()
        out2 = relu6(x)
        out_ref = ref_relu6(self.x_np)
        for r in [out1, out2]:
2310
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2311 2312

    def test_fluid_api(self):
2313
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2314
            with fluid.program_guard(fluid.Program()):
2315
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2316 2317 2318 2319 2320
                out = paddle.nn.functional.relu6(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_relu6(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
2321

2322
    def test_errors(self):
2323
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2324 2325 2326 2327
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.relu6, 1)
                # The input dtype must be float16, float32, float64.
2328
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2329 2330 2331 2332
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.relu6, x_int32)
                # support the input dtype is float16
2333
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2334 2335 2336
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.relu6(x_fp16)
2337 2338


2339 2340
class TestRelu6APIWarnings(unittest.TestCase):
    def test_warnings(self):
2341
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363
            with warnings.catch_warnings(record=True) as context:
                warnings.simplefilter("always")

                helper = LayerHelper("relu6")
                data = paddle.static.data(
                    name='data', shape=[None, 3, 32, 32], dtype='float32'
                )
                out = helper.create_variable_for_type_inference(
                    dtype=data.dtype
                )
                os.environ['FLAGS_print_extra_attrs'] = "1"
                helper.append_op(
                    type="relu6",
                    inputs={'X': data},
                    outputs={'Out': out},
                    attrs={'threshold': 6.0},
                )
                self.assertTrue(
                    "op relu6 use extra_attr: threshold"
                    in str(context[-1].message)
                )
                os.environ['FLAGS_print_extra_attrs'] = "0"
2364 2365


2366
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
Z
Zhang Ting 已提交
2367 2368 2369 2370
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
2371 2372 2373
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
2374 2375


H
huangjun12 已提交
2376 2377 2378 2379
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()
2380
        self.init_shape()
R
Roc 已提交
2381
        self.prim_op_type = "comp"
2382
        self.python_api = paddle.nn.functional.hardswish
2383
        self.public_python_api = paddle.nn.functional.hardswish
J
jakpiase 已提交
2384

2385
        np.random.seed(1024)
2386
        x = np.random.uniform(-6, 6, self.shape).astype(self.dtype)
H
huangjun12 已提交
2387 2388 2389
        threshold = 6.0
        scale = 6.0
        offset = 3.0
2390
        # the same with TestAbs
H
huangjun12 已提交
2391 2392
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
2393
        out = ref_hardswish(x, threshold, scale, offset)
H
huangjun12 已提交
2394

2395
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
H
huangjun12 已提交
2396
        self.outputs = {'Out': out}
2397 2398
        self.convert_input_output()
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
R
Roc 已提交
2399
        self.enable_cinn = False
H
huangjun12 已提交
2400

2401 2402 2403
    def init_shape(self):
        self.shape = [10, 12]

2404 2405 2406
    def if_only_check_prim(self):
        return False

H
huangjun12 已提交
2407
    def test_check_grad(self):
2408 2409 2410 2411 2412 2413
        self.check_grad(
            ['X'],
            'Out',
            check_prim=True,
            only_check_prim=self.if_only_check_prim(),
        )
2414 2415

    def test_check_output(self):
W
wanghuancoder 已提交
2416
        self.check_output(check_prim=True)
H
huangjun12 已提交
2417 2418


2419
class TestHardSwish_ZeroDim(TestHardSwish):
R
Roc 已提交
2420 2421 2422 2423 2424 2425 2426 2427
    def setUp(self):
        super().setUp()
        self.enable_cinn = False

    def init_shape(self):
        self.shape = []


2428 2429 2430 2431
class TestHardswishAPI(unittest.TestCase):
    # test paddle.nn.Hardswish, paddle.nn.functional.hardswish
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
2432 2433 2434
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2435
            else paddle.CPUPlace()
2436
        )
2437 2438

    def test_static_api(self):
2439
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2440
            with paddle.static.program_guard(paddle.static.Program()):
2441
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2442 2443 2444 2445 2446 2447 2448 2449
                out1 = F.hardswish(x)
                m = paddle.nn.Hardswish()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardswish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2450 2451

    def test_dygraph_api(self):
2452
        x = paddle.to_tensor([11648.0, 11448.0])
2453 2454 2455
        out1 = F.hardswish(x)
        m = paddle.nn.Hardswish()
        out2 = m(x)
2456
        out_ref = [11648.0, 11448.0]
2457
        for r in [out1, out2]:
2458
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2459 2460

    def test_fluid_api(self):
2461
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2462
            with fluid.program_guard(fluid.Program()):
2463
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
2464 2465 2466 2467 2468 2469
                out = paddle.nn.functional.hardswish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardswish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

2470
        x = paddle.to_tensor(self.x_np)
2471
        out = paddle.nn.functional.hardswish(x)
2472
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
2473 2474

    def test_errors(self):
2475
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2476 2477 2478 2479
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardswish, 1)
                # The input dtype must be float16, float32, float64.
2480
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2481 2482 2483 2484
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardswish, x_int32)
                # support the input dtype is float16
2485
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2486 2487 2488
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardswish(x_fp16)
2489 2490


C
chengduo 已提交
2491
class TestSoftRelu(TestActivation):
2492 2493
    def setUp(self):
        self.op_type = "soft_relu"
2494 2495
        self.init_dtype()

2496
        np.random.seed(4096)
2497
        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
2498
        threshold = 2.0
Q
qijun 已提交
2499 2500
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
2501
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
2502 2503 2504
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
2505
        out = np.log(np.exp(t) + 1)
2506 2507 2508

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2509 2510
        self.convert_input_output()
        self.attrs = {'threshold': threshold}
2511

2512 2513 2514
    def test_check_output(self):
        self.check_output(check_dygraph=False)

2515
    def test_check_grad(self):
2516 2517
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2518 2519 2520
        self.check_grad(
            ['X'], 'Out', max_relative_error=0.02, check_dygraph=False
        )
2521

2522

2523
def elu(x, alpha):
Z
zhupengyang 已提交
2524
    out_ref = np.where(x > 0, x, alpha * (np.exp(x) - 1))
2525 2526 2527
    return out_ref.astype(x.dtype)


C
chengduo 已提交
2528
class TestELU(TestActivation):
2529 2530
    def setUp(self):
        self.op_type = "elu"
2531
        self.init_dtype()
2532
        self.init_shape()
W
wanghuancoder 已提交
2533
        self.python_api = paddle.nn.functional.elu
2534

2535
        np.random.seed(1024)
2536
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
Z
zhupengyang 已提交
2537
        alpha = self.get_alpha()
2538
        out = elu(x, alpha)
2539 2540
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
2541 2542

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2543
        self.outputs = {'Out': out}
2544 2545
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2546

2547 2548 2549
    def init_shape(self):
        self.shape = [10, 12]

2550
    def test_check_grad(self):
2551 2552
        if self.dtype == np.float16:
            return
2553
        self.check_grad(['X'], 'Out')
2554

Z
zhupengyang 已提交
2555
    def get_alpha(self):
2556
        return 1.0
Z
zhupengyang 已提交
2557 2558 2559 2560 2561 2562


class TestELUAlpha(TestELU):
    def get_alpha(self):
        return -0.2

2563

2564 2565 2566 2567 2568
class TestELU_ZeroDim(TestELU):
    def init_shape(self):
        self.shape = []


2569 2570 2571
class TestELUAPI(unittest.TestCase):
    # test paddle.nn.ELU, paddle.nn.functional.elu
    def setUp(self):
2572
        np.random.seed(1024)
2573
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2574 2575 2576
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2577
            else paddle.CPUPlace()
2578
        )
2579 2580 2581 2582
        self.executed_api()

    def executed_api(self):
        self.elu = F.elu
2583 2584

    def test_static_api(self):
2585
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2586
            with paddle.static.program_guard(paddle.static.Program()):
2587
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2588 2589 2590 2591 2592 2593 2594 2595
                out1 = self.elu(x)
                m = paddle.nn.ELU()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = elu(self.x_np, 1.0)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2596 2597 2598

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
2599 2600
        out1 = self.elu(x)
        x = paddle.to_tensor(self.x_np)
2601 2602 2603 2604
        m = paddle.nn.ELU()
        out2 = m(x)
        out_ref = elu(self.x_np, 1.0)
        for r in [out1, out2]:
2605
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2606

2607 2608
        out1 = self.elu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
2609 2610 2611 2612
        m = paddle.nn.ELU(0.2)
        out2 = m(x)
        out_ref = elu(self.x_np, 0.2)
        for r in [out1, out2]:
2613
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2614

2615
    def test_errors(self):
2616
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2617 2618 2619 2620
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.elu, 1)
                # The input dtype must be float16, float32, float64.
2621
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2622 2623 2624 2625
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.elu, x_int32)
                # support the input dtype is float16
2626
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2627 2628 2629
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.elu(x_fp16)
2630 2631


Z
zhupengyang 已提交
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641
class TestELUInplaceAPI(TestELUAPI):
    # test paddle.nn.functional.elu_
    def executed_api(self):
        self.elu = F.elu_

    def test_alpha_error(self):
        x = paddle.to_tensor(self.x_np)
        self.assertRaises(Exception, F.elu_, x, -0.2)


2642 2643 2644 2645 2646 2647 2648 2649 2650
def celu(x, alpha):
    out_ref = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x / alpha) - 1))
    return out_ref.astype(x.dtype)


class TestCELU(TestActivation):
    def setUp(self):
        self.op_type = "celu"
        self.init_dtype()
2651
        self.init_shape()
2652

2653
        self.python_api = paddle.nn.functional.celu
2654
        np.random.seed(1024)
2655
        x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
2656 2657
        alpha = 1.5
        out = celu(x, alpha)
2658 2659

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
2660
        self.outputs = {'Out': out}
2661 2662
        self.convert_input_output()
        self.attrs = {'alpha': alpha}
2663

2664 2665 2666
    def init_shape(self):
        self.shape = [10, 12]

2667 2668 2669
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2670
        self.check_grad(['X'], 'Out')
2671 2672


2673 2674 2675 2676 2677
class TestCELU_ZeroDim(TestCELU):
    def init_shape(self):
        self.shape = []


2678 2679 2680 2681 2682
class TestCELUAPI(unittest.TestCase):
    # test paddle.nn.CELU, paddle.nn.functional.celu
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-3, 3, [10, 12]).astype('float32')
2683 2684 2685
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
2686
            else paddle.CPUPlace()
2687
        )
2688 2689 2690 2691 2692 2693
        self.executed_api()

    def executed_api(self):
        self.celu = F.celu

    def test_static_api(self):
2694
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2695
            with paddle.static.program_guard(paddle.static.Program()):
2696
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
2697 2698 2699 2700 2701 2702 2703 2704
                out1 = self.celu(x, 1.5)
                m = paddle.nn.CELU(1.5)
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = celu(self.x_np, 1.5)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
2705 2706 2707 2708 2709 2710 2711 2712 2713

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = self.celu(x, 1.5)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(1.5)
        out2 = m(x)
        out_ref = celu(self.x_np, 1.5)
        for r in [out1, out2]:
2714
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2715 2716 2717 2718 2719 2720 2721

        out1 = self.celu(x, 0.2)
        x = paddle.to_tensor(self.x_np)
        m = paddle.nn.CELU(0.2)
        out2 = m(x)
        out_ref = celu(self.x_np, 0.2)
        for r in [out1, out2]:
2722
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
2723 2724

    def test_errors(self):
2725
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2726 2727 2728 2729
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, self.celu, 1)
                # The input dtype must be float16, float32, float64.
2730
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
2731 2732 2733 2734
                    name='x_int32', shape=[10, 12], dtype='int32'
                )
                self.assertRaises(TypeError, self.celu, x_int32)
                # The alpha must be not equal 0
2735
                x_fp32 = paddle.static.data(
W
wanghuancoder 已提交
2736 2737 2738 2739
                    name='x_fp32', shape=[10, 12], dtype='float32'
                )
                self.assertRaises(ZeroDivisionError, F.celu, x_fp32, 0)
                # support the input dtype is float16
2740
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
2741 2742 2743
                    name='x_fp16', shape=[10, 12], dtype='float16'
                )
                self.celu(x_fp16)
2744 2745


C
chengduo 已提交
2746
class TestReciprocal(TestActivation):
Q
qijun 已提交
2747 2748
    def setUp(self):
        self.op_type = "reciprocal"
2749
        self.python_api = paddle.reciprocal
2750
        self.init_dtype()
2751
        self.init_shape()
2752

2753
        np.random.seed(1024)
2754
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
2755 2756 2757 2758
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2759
        self.convert_input_output()
Q
qijun 已提交
2760 2761

    def test_check_grad(self):
2762 2763
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2764
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
2765 2766

    def test_check_output(self):
W
wanghuancoder 已提交
2767
        self.check_output()
Q
qijun 已提交
2768 2769


2770 2771 2772 2773 2774
class TestReciprocal_ZeroDim(TestReciprocal):
    def init_shape(self):
        self.shape = []


C
chengduo 已提交
2775
class TestLog(TestActivation):
Q
qijun 已提交
2776 2777
    def setUp(self):
        self.op_type = "log"
2778
        self.prim_op_type = "prim"
2779
        self.python_api = paddle.log
2780
        self.public_python_api = paddle.log
2781
        self.init_dtype()
2782
        self.init_shape()
2783
        self.if_enable_cinn()
2784

2785
        np.random.seed(1024)
2786
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2787 2788 2789 2790
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2791
        self.convert_input_output()
Q
qijun 已提交
2792

2793 2794 2795
    def if_enable_cinn(self):
        pass

Q
qijun 已提交
2796
    def test_check_grad(self):
2797 2798
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2799
        self.check_grad(['X'], 'Out', check_prim=True)
Q
qijun 已提交
2800

2801
    def test_error(self):
2802 2803
        with paddle.fluid.framework._static_guard():
            with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2804 2805 2806 2807 2808 2809
                in1 = paddle.static.data(
                    name="in1", shape=[11, 17], dtype="int32"
                )
                in2 = paddle.static.data(
                    name="in2", shape=[11, 17], dtype="int64"
                )
2810

W
wanghuancoder 已提交
2811 2812
                self.assertRaises(TypeError, paddle.log, in1)
                self.assertRaises(TypeError, paddle.log, in2)
2813

2814

2815 2816
class Test_Log_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2817
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
2828 2829


2830 2831 2832 2833
class TestLog_ZeroDim(TestLog):
    def init_shape(self):
        self.shape = []

2834 2835 2836
    def if_enable_cinn(self):
        self.enable_cinn = False

2837

J
joejiong 已提交
2838 2839 2840
class TestLog2(TestActivation):
    def setUp(self):
        self.op_type = "log2"
2841
        self.python_api = paddle.log2
J
joejiong 已提交
2842
        self.init_dtype()
2843
        self.init_shape()
J
joejiong 已提交
2844

2845
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2846 2847 2848 2849
        out = np.log2(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2850
        self.convert_input_output()
J
joejiong 已提交
2851 2852 2853 2854

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2855
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2856 2857

    def test_error(self):
2858
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2859 2860
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2861

W
wanghuancoder 已提交
2862 2863
            self.assertRaises(TypeError, paddle.log2, in1)
            self.assertRaises(TypeError, paddle.log2, in2)
J
joejiong 已提交
2864 2865

    def test_api(self):
2866
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log2(data_x)
                exe = paddle.static.Executor(place=fluid.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log2(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2885 2886 2887 2888 2889 2890 2891 2892

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log2(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log2(np_x))
2893
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2894 2895


2896 2897 2898 2899 2900
class TestLog2_ZeroDim(TestLog2):
    def init_shape(self):
        self.shape = []


J
joejiong 已提交
2901 2902 2903
class TestLog10(TestActivation):
    def setUp(self):
        self.op_type = "log10"
2904
        self.python_api = paddle.log10
J
joejiong 已提交
2905
        self.init_dtype()
2906
        self.init_shape()
J
joejiong 已提交
2907

2908
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
J
joejiong 已提交
2909 2910 2911 2912
        out = np.log10(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2913
        self.convert_input_output()
J
joejiong 已提交
2914 2915 2916 2917

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2918
        self.check_grad(['X'], 'Out')
J
joejiong 已提交
2919

2920 2921 2922 2923 2924 2925 2926

class TestLog10_ZeroDim(TestLog10):
    def init_shape(self):
        self.shape = []


class TestLog10API(unittest.TestCase):
J
joejiong 已提交
2927
    def test_error(self):
2928
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2929 2930
            in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
            in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
J
joejiong 已提交
2931

W
wanghuancoder 已提交
2932 2933
            self.assertRaises(TypeError, paddle.log10, in1)
            self.assertRaises(TypeError, paddle.log10, in2)
J
joejiong 已提交
2934 2935

    def test_api(self):
2936
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954
            with paddle.static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x", shape=[11, 17], dtype="float64"
                )

                out1 = paddle.log10(data_x)
                exe = paddle.static.Executor(place=paddle.CPUPlace())
                exe.run(paddle.static.default_startup_program())
                (res1,) = exe.run(
                    paddle.static.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log10(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
J
joejiong 已提交
2955 2956 2957 2958 2959 2960 2961 2962

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = paddle.to_tensor(np_x)
            z = paddle.log10(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log10(np_x))
2963
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
J
joejiong 已提交
2964 2965


2966 2967 2968
class TestLog1p(TestActivation):
    def setUp(self):
        self.op_type = "log1p"
2969
        self.python_api = paddle.log1p
2970
        self.init_dtype()
2971
        self.init_shape()
2972

2973
        np.random.seed(1024)
2974
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
2975 2976 2977 2978
        out = np.log1p(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
2979
        self.convert_input_output()
2980 2981 2982 2983

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
2984
        self.check_grad(['X'], 'Out')
2985

2986

2987 2988
class Test_Log1p_Op_Fp16(unittest.TestCase):
    def test_api_fp16(self):
2989
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
2990 2991 2992 2993 2994 2995 2996 2997 2998 2999
            with static.program_guard(
                paddle.static.Program(), paddle.static.Program()
            ):
                x = [[2, 3, 4], [7, 8, 9]]
                x = paddle.to_tensor(x, dtype='float16')
                out = paddle.log1p(x)
                if core.is_compiled_with_cuda():
                    place = paddle.CUDAPlace(0)
                    exe = paddle.static.Executor(place)
                    (res,) = exe.run(fetch_list=[out])
3000 3001


3002 3003 3004 3005 3006 3007
class TestLog1p_ZeroDim(TestLog1p):
    def init_shape(self):
        self.shape = []


class TestLog1pAPI(unittest.TestCase):
3008
    def test_api(self):
3009
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
            with fluid.program_guard(fluid.Program(), fluid.Program()):
                input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
                data_x = paddle.static.data(
                    name="data_x",
                    shape=[11, 17],
                    dtype="float64",
                )

                out1 = paddle.log1p(data_x)
                exe = fluid.Executor(place=fluid.CPUPlace())
                exe.run(fluid.default_startup_program())
                (res1,) = exe.run(
                    fluid.default_main_program(),
                    feed={"data_x": input_x},
                    fetch_list=[out1],
                )
            expected_res = np.log1p(input_x)
            np.testing.assert_allclose(res1, expected_res, rtol=1e-05)
3028 3029 3030 3031 3032 3033 3034 3035

        # dygraph
        with fluid.dygraph.guard():
            np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
            data_x = fluid.dygraph.to_variable(np_x)
            z = paddle.log1p(data_x)
            np_z = z.numpy()
            z_expected = np.array(np.log1p(np_x))
3036
        np.testing.assert_allclose(np_z, z_expected, rtol=1e-05)
3037 3038


C
chengduo 已提交
3039
class TestSquare(TestActivation):
Q
qijun 已提交
3040 3041
    def setUp(self):
        self.op_type = "square"
3042
        self.python_api = paddle.square
3043
        self.init_dtype()
3044
        self.init_shape()
3045

3046
        np.random.seed(1024)
3047
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3048 3049 3050 3051
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3052
        self.convert_input_output()
Q
qijun 已提交
3053 3054

    def test_check_grad(self):
3055 3056
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3057
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
3058 3059

    def test_check_output(self):
W
wanghuancoder 已提交
3060
        self.check_output()
Q
qijun 已提交
3061

3062

3063 3064 3065 3066 3067
class TestSquare_ZeroDim(TestSquare):
    def init_shape(self):
        self.shape = []


3068
@unittest.skipIf(
R
ronnywang 已提交
3069 3070
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3071
)
3072 3073 3074
class TestSquareBF16(OpTest):
    def setUp(self):
        self.op_type = "square"
3075
        self.python_api = paddle.square
3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
        self.init_dtype()

        np.random.seed(1024)
        x = np.random.uniform(0.1, 1, [11, 17]).astype(np.float32)
        out = np.square(x)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x))
        }
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3092
        self.check_output_with_place(place)
3093 3094 3095

    def test_check_grad(self):
        place = core.CUDAPlace(0)
W
wanghuancoder 已提交
3096
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
3097 3098


C
chengduo 已提交
3099
class TestPow(TestActivation):
3100 3101
    def setUp(self):
        self.op_type = "pow"
3102
        self.prim_op_type = "comp"
3103
        self.python_api = paddle.pow
3104
        self.public_python_api = paddle.pow
3105
        self.init_dtype()
3106
        self.init_shape()
3107
        self.if_enable_cinn()
3108

3109
        np.random.seed(1024)
3110
        x = np.random.uniform(1, 2, self.shape).astype(self.dtype)
3111 3112 3113 3114
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
3115 3116
        self.attrs = {'factor': 3.0}
        self.convert_input_output()
3117

3118 3119 3120
    def if_enable_cinn(self):
        pass

3121
    def test_check_output(self):
3122
        self.check_output(check_prim=True)
3123

3124
    def test_check_grad(self):
3125 3126
        if self.dtype == np.float16:
            return
3127
        self.check_grad(['X'], 'Out', check_prim=True)
3128

3129

3130 3131 3132 3133
class TestPow_ZeroDim(TestPow):
    def init_shape(self):
        self.shape = []

3134
    def if_enable_cinn(self):
3135 3136
        self.enable_cinn = False

3137

3138 3139 3140
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
3141
        self.python_api = paddle.pow
3142
        self.enable_cinn = False
3143 3144
        self.init_dtype()

3145
        np.random.seed(1024)
3146 3147 3148 3149 3150
        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
W
wanghuancoder 已提交
3151
            'FactorTensor': np.array([3.0]).astype(self.dtype),
3152 3153 3154 3155 3156 3157
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
W
wanghuancoder 已提交
3158
        self.check_output()
3159 3160 3161 3162

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3163
        self.check_grad(['X'], 'Out')
3164 3165

    def test_api(self):
3166
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3167 3168 3169 3170 3171 3172 3173
            input = np.random.uniform(1, 2, [11, 17]).astype("float32")
            x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
            res = paddle.static.data(
                name="res", shape=[11, 17], dtype="float32"
            )

            factor_1 = 2.0
3174
            factor_2 = paddle.tensor.fill_constant([1], "float32", 3.0)
W
wanghuancoder 已提交
3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186
            out_1 = paddle.pow(x, factor_1)
            out_2 = paddle.pow(x, factor_2)
            out_4 = paddle.pow(x, factor_1, name='pow_res')
            out_6 = paddle.pow(x, factor_2)
            self.assertEqual(('pow_res' in out_4.name), True)

            exe = fluid.Executor(place=fluid.CPUPlace())
            res_1, res_2, res, res_6 = exe.run(
                fluid.default_main_program(),
                feed={"x": input},
                fetch_list=[out_1, out_2, res, out_6],
            )
3187

W
wanghuancoder 已提交
3188 3189 3190
            assert np.allclose(res_1, np.power(input, 2))
            assert np.allclose(res_2, np.power(input, 3))
            assert np.allclose(res_6, np.power(input, 3))
3191 3192


3193 3194 3195 3196 3197
def ref_stanh(x, scale_a=0.67, scale_b=1.7159):
    out = scale_b * np.tanh(x * scale_a)
    return out


C
chengduo 已提交
3198
class TestSTanh(TestActivation):
3199 3200 3201 3202 3203 3204
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

3205 3206
    def setUp(self):
        self.op_type = "stanh"
W
wanghuancoder 已提交
3207
        self.python_api = paddle.stanh
3208
        self.init_dtype()
3209 3210
        self.init_shape()

3211 3212
        scale_a = self.get_scale_a()
        scale_b = self.get_scale_b()
3213

3214
        np.random.seed(1024)
3215
        x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
3216 3217
        # The same reason with TestAbs
        out = ref_stanh(x, scale_a, scale_b)
3218

3219
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3220
        self.outputs = {'Out': out}
3221 3222
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
        self.convert_input_output()
3223

Q
qijun 已提交
3224
    def test_check_grad(self):
3225 3226
        if self.dtype == np.float16:
            return
3227
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
3228

3229

3230 3231 3232 3233 3234 3235 3236 3237 3238 3239
class TestSTanhScaleA(TestSTanh):
    def get_scale_a(self):
        return 2.0


class TestSTanhScaleB(TestSTanh):
    def get_scale_b(self):
        return 0.5


3240 3241 3242 3243 3244
class TestSTanh_ZeroDim(TestSTanh):
    def init_shape(self):
        self.shape = []


3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
class TestSTanhAPI(unittest.TestCase):
    # test paddle.nn.stanh
    def get_scale_a(self):
        return 0.67

    def get_scale_b(self):
        return 1.7159

    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype('float32')
        self.scale_a = self.get_scale_a()
        self.scale_b = self.get_scale_b()
3258 3259 3260
        self.place = (
            paddle.CUDAPlace(0)
            if core.is_compiled_with_cuda()
3261
            else paddle.CPUPlace()
3262
        )
3263 3264

    def test_static_api(self):
3265
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3266
            with paddle.static.program_guard(paddle.static.Program()):
3267
                x = paddle.static.data('X', [10, 12])
W
wanghuancoder 已提交
3268 3269 3270 3271 3272 3273
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3274 3275 3276 3277 3278 3279

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out = paddle.stanh(x, self.scale_a, self.scale_b)
        out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
        for r in [out]:
3280
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3281 3282

    def test_fluid_api(self):
3283
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3284
            with fluid.program_guard(fluid.Program()):
3285
                x = paddle.static.data('X', [10, 12], dtype="float32")
W
wanghuancoder 已提交
3286 3287 3288 3289 3290
                out = paddle.stanh(x, self.scale_a, self.scale_b)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3291

3292
    def test_errors(self):
3293
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3294 3295 3296 3297
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, paddle.stanh, 1)
                # The input dtype must be float16, float32, float64.
3298
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3299 3300 3301 3302
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, paddle.stanh, x_int32)
                # support the input dtype is float16
3303
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3304 3305 3306
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                paddle.stanh(x_fp16)
3307 3308 3309 3310 3311 3312 3313 3314 3315 3316


class TestSTanhAPIScaleA(TestSTanhAPI):
    def get_scale_a(self):
        return 2.0


class TestSTanhAPIScaleB(TestSTanhAPI):
    def get_scale_b(self):
        return 0.5
3317 3318


3319 3320
def ref_softplus(x, beta=1, threshold=20):
    x_beta = beta * x
3321 3322 3323 3324
    out = np.select(
        [x_beta <= threshold, x_beta > threshold],
        [np.log(1 + np.exp(x_beta)) / beta, x],
    )
3325 3326 3327
    return out


C
chengduo 已提交
3328
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
3329 3330
    def setUp(self):
        self.op_type = "softplus"
W
Wang Bojun 已提交
3331
        self.python_api = paddle.nn.functional.softplus
3332
        self.init_dtype()
3333
        self.init_shape()
3334

3335 3336
        beta = 2
        threshold = 15
3337

3338
        np.random.seed(1024)
3339
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3340 3341 3342
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': x}
        self.attrs = {'beta': beta, "threshold": threshold}
3343
        self.outputs = {'Out': out}
K
kexinzhao 已提交
3344

3345 3346 3347
    def init_shape(self):
        self.shape = [10, 12]

K
kexinzhao 已提交
3348
    def test_check_grad(self):
3349 3350
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3351
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
3352

3353

3354 3355 3356 3357 3358
class TestSoftplus_ZeroDim(TestSoftplus):
    def init_shape(self):
        self.shape = []


3359
@unittest.skipIf(
R
ronnywang 已提交
3360 3361
    not core.is_compiled_with_cuda() or core.is_compiled_with_rocm(),
    "core is not compiled with CUDA",
3362
)
3363 3364 3365 3366
class TestSoftplusBF16(OpTest):
    def setUp(self):
        self.op_type = "softplus"
        self.init_dtype()
W
wanghuancoder 已提交
3367
        self.python_api = paddle.nn.functional.softplus
3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390

        beta = 2
        threshold = 15

        np.random.seed(1024)
        x = np.random.uniform(-1, 1, [10, 12]).astype(np.float32)
        out = ref_softplus(x, beta, threshold)
        self.inputs = {'X': convert_float_to_uint16(x)}
        self.attrs = {'beta': beta, "threshold": threshold}
        self.outputs = {'Out': convert_float_to_uint16(out)}

    def init_dtype(self):
        self.dtype = np.uint16

    def test_check_output(self):
        place = core.CUDAPlace(0)
        self.check_output_with_place(place)

    def test_check_grad(self):
        place = core.CUDAPlace(0)
        self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.05)


3391 3392 3393 3394 3395
class TestSoftplusAPI(unittest.TestCase):
    # test paddle.nn.Softplus, paddle.nn.functional.softplus
    def setUp(self):
        self.beta = 2
        self.threshold = 15
3396
        np.random.seed(1024)
3397
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3398 3399 3400
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3401
            else paddle.CPUPlace()
3402
        )
3403 3404

    def test_static_api(self):
3405
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3406
            with paddle.static.program_guard(paddle.static.Program()):
3407
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3408 3409 3410 3411 3412 3413 3414 3415
                out1 = F.softplus(x, self.beta, self.threshold)
                softplus = paddle.nn.Softplus(self.beta, self.threshold)
                out2 = softplus(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3416 3417 3418 3419 3420 3421 3422 3423

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softplus(x, self.beta, self.threshold)
        softplus = paddle.nn.Softplus(self.beta, self.threshold)
        out2 = softplus(x)
        out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
        for r in [out1, out2]:
3424
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3425 3426

    def test_errors(self):
3427
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3428 3429 3430 3431
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softplus, 1)
                # The input dtype must be float16, float32, float64.
3432
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3433 3434 3435 3436
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softplus, x_int32)
                # support the input dtype is float16
3437
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3438 3439 3440
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softplus(x_fp16)
3441 3442 3443 3444 3445 3446 3447


def ref_softsign(x):
    out = np.divide(x, 1 + np.abs(x))
    return out


C
chengduo 已提交
3448
class TestSoftsign(TestActivation):
3449 3450
    def setUp(self):
        self.op_type = "softsign"
3451
        self.init_dtype()
3452 3453
        self.init_shape()

3454
        self.python_api = paddle.nn.functional.softsign
3455

3456
        np.random.seed(1024)
3457
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3458
        out = ref_softsign(x)
3459 3460

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3461
        self.outputs = {'Out': out}
3462
        self.convert_input_output()
3463

3464 3465 3466
    def init_shape(self):
        self.shape = [10, 12]

3467
    def test_check_grad(self):
3468 3469
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3470
        self.check_grad(['X'], 'Out')
3471 3472


3473 3474 3475 3476 3477
class TestSoftsign_ZeroDim(TestSoftsign):
    def init_shape(self):
        self.shape = []


3478 3479 3480
class TestSoftsignAPI(unittest.TestCase):
    # test paddle.nn.Softsign, paddle.nn.functional.softsign
    def setUp(self):
3481
        np.random.seed(1024)
3482
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3483 3484 3485
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3486
            else paddle.CPUPlace()
3487
        )
3488 3489

    def test_static_api(self):
3490
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3491
            with paddle.static.program_guard(paddle.static.Program()):
3492
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3493 3494 3495 3496 3497 3498 3499 3500
                out1 = F.softsign(x)
                softsign = paddle.nn.Softsign()
                out2 = softsign(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_softsign(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3501 3502 3503 3504 3505 3506 3507 3508

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.softsign(x)
        softsign = paddle.nn.Softsign()
        out2 = softsign(x)
        out_ref = ref_softsign(self.x_np)
        for r in [out1, out2]:
3509
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3510 3511

    def test_errors(self):
3512
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3513 3514 3515 3516
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.softsign, 1)
                # The input dtype must be float16, float32, float64.
3517
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3518 3519 3520 3521
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.softsign, x_int32)
                # support the input dtype is float16
3522
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3523 3524 3525
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.softsign(x_fp16)
3526 3527


3528 3529 3530 3531 3532
def ref_thresholded_relu(x, threshold=1.0):
    out = (x > threshold) * x
    return out


C
chengduo 已提交
3533
class TestThresholdedRelu(TestActivation):
3534 3535
    def setUp(self):
        self.op_type = "thresholded_relu"
3536
        self.init_dtype()
3537
        self.init_shape()
W
wanghuancoder 已提交
3538
        self.python_api = paddle.nn.functional.thresholded_relu
3539

3540
        threshold = 15
3541

3542
        np.random.seed(1024)
3543
        x = np.random.uniform(-20, 20, self.shape).astype(self.dtype)
3544 3545
        x[np.abs(x) < 0.005] = 0.02
        out = ref_thresholded_relu(x, threshold)
3546 3547

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3548
        self.outputs = {'Out': out}
3549 3550
        self.attrs = {"threshold": threshold}
        self.convert_input_output()
3551

3552 3553 3554
    def init_shape(self):
        self.shape = [10, 12]

3555
    def test_check_grad(self):
3556 3557
        if self.dtype == np.float16:
            return
3558
        self.check_grad(['X'], 'Out')
3559 3560


3561 3562 3563 3564 3565
class TestThresholdedRelu_ZeroDim(TestThresholdedRelu):
    def init_shape(self):
        self.shape = []


3566 3567 3568 3569 3570 3571 3572
class TestThresholdedReluAPI(unittest.TestCase):
    # test paddle.nn.ThresholdedReLU, paddle.nn.functional.thresholded_relu
    def setUp(self):
        self.threshold = 15
        np.random.seed(1024)
        self.x_np = np.random.uniform(-20, 20, [10, 12]).astype(np.float64)
        self.x_np[np.abs(self.x_np) < 0.005] = 0.02
3573 3574 3575
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3576
            else paddle.CPUPlace()
3577
        )
3578 3579

    def test_static_api(self):
3580
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3581
            with paddle.static.program_guard(paddle.static.Program()):
3582
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3583 3584 3585 3586 3587 3588 3589 3590
                out1 = F.thresholded_relu(x, self.threshold)
                thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
                out2 = thresholded_relu(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_thresholded_relu(self.x_np, self.threshold)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3591 3592

    def test_dygraph_api(self):
3593
        paddle.disable_static()
3594 3595 3596 3597 3598 3599
        x = paddle.to_tensor(self.x_np)
        out1 = F.thresholded_relu(x, self.threshold)
        thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
        out2 = thresholded_relu(x)
        out_ref = ref_thresholded_relu(self.x_np, self.threshold)
        for r in [out1, out2]:
3600
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3601

3602
    def test_errors(self):
3603
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3604 3605 3606 3607
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.thresholded_relu, 1)
                # The input dtype must be float16, float32, float64.
3608
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3609 3610 3611 3612
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.thresholded_relu, x_int32)
                # support the input dtype is float16
3613
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3614 3615 3616
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.thresholded_relu(x_fp16)
3617 3618


3619
def ref_hardsigmoid(x, slope=0.166666666666667, offset=0.5):
3620
    return np.maximum(np.minimum(x * slope + offset, 1.0), 0.0).astype(x.dtype)
3621 3622


C
chengduo 已提交
3623
class TestHardSigmoid(TestActivation):
3624 3625
    def setUp(self):
        self.op_type = "hard_sigmoid"
3626 3627 3628 3629
        self.dtype = 'float64'
        self.slope = 0.166666666666667
        self.offset = 0.5
        self.set_attrs()
3630
        self.init_shape()
W
wanghuancoder 已提交
3631
        self.python_api = paddle.nn.functional.hardsigmoid
3632

3633
        x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
3634
        lower_threshold = -self.offset / self.slope
3635
        upper_threshold = (1.0 - self.offset) / self.slope
Z
zhupengyang 已提交
3636

3637
        # Same reason as TestAbs
3638 3639 3640
        delta = 0.005
        x[np.abs(x - lower_threshold) < delta] = lower_threshold - 0.02
        x[np.abs(x - upper_threshold) < delta] = upper_threshold - 0.02
3641

3642
        out = ref_hardsigmoid(x, self.slope, self.offset)
3643

3644
        self.attrs = {'slope': self.slope, 'offset': self.offset}
3645 3646

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3647
        self.outputs = {'Out': out}
3648
        self.convert_input_output()
3649

3650 3651 3652
    def init_shape(self):
        self.shape = [10, 12]

3653 3654
    def set_attrs(self):
        pass
3655

3656

3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667
class TestHardSigmoidFP32(TestHardSigmoid):
    def set_attrs(self):
        self.dtype = 'float32'


class TestHardSigmoidSlopeOffset(TestHardSigmoid):
    def set_attrs(self):
        self.slope = 0.2
        self.offset = 0.4


3668 3669 3670 3671 3672
class TestHardSigmoid_ZeroDim(TestHardSigmoid):
    def init_shape(self):
        self.shape = []


3673 3674 3675 3676
class TestHardsigmoidAPI(unittest.TestCase):
    # test paddle.nn.Hardsigmoid, paddle.nn.functional.hardsigmoid
    def setUp(self):
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3677 3678 3679
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3680
            else paddle.CPUPlace()
3681
        )
3682 3683

    def test_static_api(self):
3684
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3685 3686 3687 3688 3689 3690 3691 3692 3693 3694
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.hardsigmoid(x)
                m = paddle.nn.Hardsigmoid()
                out2 = m(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_hardsigmoid(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3695 3696 3697 3698 3699 3700 3701 3702

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.hardsigmoid(x)
        m = paddle.nn.Hardsigmoid()
        out2 = m(x)
        out_ref = ref_hardsigmoid(self.x_np)
        for r in [out1, out2]:
3703
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3704 3705

    def test_fluid_api(self):
3706
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3707
            with fluid.program_guard(fluid.Program()):
3708
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3709 3710 3711 3712 3713 3714
                out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)

3715
        paddle.disable_static(self.place)
3716
        x = paddle.to_tensor(self.x_np)
3717
        out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
3718
        np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
3719 3720

    def test_errors(self):
3721
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3722 3723 3724 3725
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.hardsigmoid, 1)
                # The input dtype must be float16, float32, float64.
3726
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3727 3728 3729 3730
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.hardsigmoid, x_int32)
                # support the input dtype is float16
3731
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3732 3733 3734
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.hardsigmoid(x_fp16)
3735 3736


3737 3738 3739 3740 3741
def ref_swish(x):
    out = x * expit(x)
    return out


C
chengduo 已提交
3742
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
3743 3744
    def setUp(self):
        self.op_type = "swish"
3745
        self.python_api = paddle.nn.functional.swish
3746
        self.init_dtype()
3747 3748
        self.init_shape()

3749
        np.random.seed(1024)
3750
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3751
        out = ref_swish(x)
3752 3753

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3754
        self.outputs = {'Out': out}
3755 3756
        self.attrs = {'beta': 1.0}
        self.convert_input_output()
A
Abhinav Arora 已提交
3757

3758 3759 3760
    def init_shape(self):
        self.shape = [10, 12]

A
Abhinav Arora 已提交
3761
    def test_check_grad(self):
3762 3763
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3764 3765 3766 3767
        self.check_grad(
            ['X'],
            'Out',
        )
3768

A
Abhinav Arora 已提交
3769

3770 3771 3772 3773 3774
class TestSwish_ZeroDim(TestSwish):
    def init_shape(self):
        self.shape = []


3775 3776 3777 3778 3779
class TestSwishAPI(unittest.TestCase):
    # test paddle.nn.Swish, paddle.nn.functional.swish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3780 3781 3782
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3783
            else paddle.CPUPlace()
3784
        )
3785 3786

    def test_static_api(self):
3787
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3788 3789 3790 3791 3792 3793 3794 3795 3796 3797
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.swish(x)
                swish = paddle.nn.Swish()
                out2 = swish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_swish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3798

3799
    def test_dygraph_api(self):
3800 3801 3802 3803 3804 3805
        x = paddle.to_tensor(self.x_np)
        out1 = F.swish(x)
        swish = paddle.nn.Swish()
        out2 = swish(x)
        out_ref = ref_swish(self.x_np)
        for r in [out1, out2]:
3806
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3807 3808

    def test_fluid_api(self):
3809
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3810
            with fluid.program_guard(fluid.Program()):
3811
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3812 3813 3814 3815 3816
                out = paddle.nn.functional.swish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_swish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3817

3818
    def test_errors(self):
3819
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3820 3821 3822 3823
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.swish, 1)
                # The input dtype must be float16, float32, float64.
3824
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3825 3826 3827 3828
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.swish, x_int32)
                # support the input dtype is float16
3829
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3830 3831 3832
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.swish(x_fp16)
3833 3834


3835 3836 3837 3838
def ref_mish(x, threshold=20.0):
    softplus = np.select(
        [x <= threshold, x > threshold], [np.log(1 + np.exp(x)), x]
    )
3839 3840 3841 3842 3843 3844
    return x * np.tanh(softplus)


class TestMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
3845
        self.python_api = paddle.nn.functional.mish
3846
        self.init_dtype()
3847
        self.init_shape()
3848 3849

        np.random.seed(1024)
3850
        x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
3851
        out = ref_mish(x)
3852 3853

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
3854
        self.outputs = {'Out': out}
3855
        self.convert_input_output()
3856

3857 3858 3859
    def init_shape(self):
        self.shape = [10, 12]

3860
    def test_check_output(self):
W
wanghuancoder 已提交
3861
        self.check_output()
3862

3863 3864 3865
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
W
wanghuancoder 已提交
3866
        self.check_grad(['X'], 'Out')
3867 3868


3869 3870 3871 3872 3873
class TestMish_ZeroDim(TestMish):
    def init_shape(self):
        self.shape = []


3874 3875 3876 3877 3878
class TestMishAPI(unittest.TestCase):
    # test paddle.nn.Mish, paddle.nn.functional.mish
    def setUp(self):
        np.random.seed(1024)
        self.x_np = np.random.uniform(-1, 1, [10, 12]).astype(np.float64)
3879 3880 3881
        self.place = (
            paddle.CUDAPlace(0)
            if paddle.is_compiled_with_cuda()
3882
            else paddle.CPUPlace()
3883
        )
3884 3885

    def test_static_api(self):
3886
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3887 3888 3889 3890 3891 3892 3893 3894 3895 3896
            with paddle.static.program_guard(paddle.static.Program()):
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
                out1 = F.mish(x)
                mish = paddle.nn.Mish()
                out2 = mish(x)
                exe = paddle.static.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
            out_ref = ref_mish(self.x_np)
            for r in res:
                np.testing.assert_allclose(out_ref, r, rtol=1e-05)
3897 3898 3899 3900 3901 3902 3903 3904

    def test_dygraph_api(self):
        x = paddle.to_tensor(self.x_np)
        out1 = F.mish(x)
        mish = paddle.nn.Mish()
        out2 = mish(x)
        out_ref = ref_mish(self.x_np)
        for r in [out1, out2]:
3905
            np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
3906 3907

    def test_fluid_api(self):
3908
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3909
            with fluid.program_guard(fluid.Program()):
3910
                x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
W
wanghuancoder 已提交
3911 3912 3913 3914 3915
                out = paddle.nn.functional.mish(x)
                exe = fluid.Executor(self.place)
                res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
            out_ref = ref_mish(self.x_np)
            np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
3916 3917

    def test_errors(self):
3918
        with paddle.fluid.framework._static_guard():
W
wanghuancoder 已提交
3919 3920 3921 3922
            with paddle.static.program_guard(paddle.static.Program()):
                # The input type must be Variable.
                self.assertRaises(TypeError, F.mish, 1)
                # The input dtype must be float16, float32, float64.
3923
                x_int32 = paddle.static.data(
W
wanghuancoder 已提交
3924 3925 3926 3927
                    name='x_int32', shape=[12, 10], dtype='int32'
                )
                self.assertRaises(TypeError, F.mish, x_int32)
                # support the input dtype is float16
3928
                x_fp16 = paddle.static.data(
W
wanghuancoder 已提交
3929 3930 3931
                    name='x_fp16', shape=[12, 10], dtype='float16'
                )
                F.mish(x_fp16)
3932 3933


3934
# ------------------ Test Cudnn Activation----------------------
3935
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
3936 3937 3938
    @unittest.skipIf(
        not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
3939 3940 3941 3942
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

3943
    cls_name = "{}_{}".format(parent.__name__, "cudnn")
3944 3945 3946 3947 3948 3949 3950 3951 3952 3953
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


3954 3955
# ------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(
3956 3957 3958
    parent,
    atol=1e-3,
    grad_check=True,
3959
    check_dygraph=True,
3960
    check_prim=False,
3961
    enable_cinn=False,
3962
    grad_atol=1e-2,
3963
    **kwargs
3964 3965 3966 3967
):
    @unittest.skipIf(
        not paddle.is_compiled_with_cuda(), "core is not compiled with CUDA"
    )
C
chengduo 已提交
3968
    class TestActFp16(parent):
3969 3970 3971 3972 3973
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

C
chengduo 已提交
3974 3975
        def init_dtype(self):
            self.dtype = np.float16
3976

3977
        def if_enable_cinn(self):
3978 3979
            self.enable_cinn = enable_cinn

C
chengduo 已提交
3980
        def test_check_output(self):
3981
            place = core.CUDAPlace(0)
C
chengduo 已提交
3982 3983
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
3984
                self.check_output_with_place(
3985 3986 3987 3988
                    place,
                    atol=atol,
                    check_dygraph=check_dygraph,
                    check_prim=check_prim,
3989
                )
3990

C
chengduo 已提交
3991 3992 3993 3994
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
3995
                self.check_grad_with_place(
3996 3997 3998
                    place,
                    ['X'],
                    'Out',
3999
                    check_dygraph=check_dygraph,
4000 4001
                    check_prim=check_prim,
                    max_relative_error=grad_atol,
4002
                )
C
chengduo 已提交
4003

4004
    cls_name = "{}_{}".format(parent.__name__, "FP16OP")
C
chengduo 已提交
4005 4006 4007 4008
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


4009
create_test_act_fp16_class(TestActivation)
4010
create_test_act_fp16_class(TestExpFp32_Prim, check_prim=True, enable_cinn=True)
R
ronnywang 已提交
4011
create_test_act_fp16_class(TestExpm1)
4012 4013
create_test_act_fp16_class(TestSigmoid, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSilu, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4014
create_test_act_fp16_class(TestLogSigmoid)
4015
create_test_act_fp16_class(TestTanh, check_prim=True, enable_cinn=True)
4016
create_test_act_fp16_class(TestTanhshrink)
C
chengduo 已提交
4017
create_test_act_fp16_class(TestHardShrink)
4018
create_test_act_fp16_class(TestSoftshrink)
4019 4020 4021
create_test_act_fp16_class(TestSqrt, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestSqrtComp, check_prim=True, enable_cinn=True)
create_test_act_fp16_class(TestAbs, check_prim=True, enable_cinn=True)
C
chengduo 已提交
4022
create_test_act_fp16_class(TestCeil, grad_check=False)
4023 4024 4025
create_test_act_fp16_class(
    TestFloor, check_prim=True, grad_check=False, enable_cinn=True
)
4026 4027 4028 4029
create_test_act_fp16_class(TestCos)
create_test_act_fp16_class(TestTan)
create_test_act_fp16_class(TestCosh)
create_test_act_fp16_class(TestAcos)
C
chengduo 已提交
4030
create_test_act_fp16_class(TestSin)
4031
create_test_act_fp16_class(TestSinh)
4032 4033
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
4034 4035 4036
create_test_act_fp16_class(TestAcosh)
create_test_act_fp16_class(TestAsinh)
create_test_act_fp16_class(TestAtanh)
C
chengduo 已提交
4037
create_test_act_fp16_class(TestRound, grad_check=False)
4038
create_test_act_fp16_class(TestRelu, check_prim=True, enable_cinn=True)
4039 4040 4041
create_test_act_fp16_class(
    TestGelu,
    check_prim=True,
4042
    enable_cinn=True,
4043 4044
    rev_comp_rtol=1e-3,
    rev_comp_atol=1e-3,
4045 4046
    cinn_rtol=1e-3,
    cinn_atol=1e-3,
4047
)
C
chengduo 已提交
4048 4049
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
4050
create_test_act_fp16_class(TestSoftRelu, check_dygraph=False)
C
chengduo 已提交
4051
create_test_act_fp16_class(TestELU)
4052
create_test_act_fp16_class(TestCELU)
C
chengduo 已提交
4053
create_test_act_fp16_class(TestReciprocal)
4054
create_test_act_fp16_class(TestLog, check_prim=True)
4055
if core.is_compiled_with_rocm():
4056
    create_test_act_fp16_class(TestLog2)
4057
else:
4058 4059 4060
    create_test_act_fp16_class(TestLog2)
create_test_act_fp16_class(TestLog10)
create_test_act_fp16_class(TestLog1p)
C
chengduo 已提交
4061
create_test_act_fp16_class(TestSquare)
4062 4063 4064
create_test_act_fp16_class(TestPow, check_prim=True)
create_test_act_fp16_class(TestPow_factor_tensor)
create_test_act_fp16_class(TestSTanh)
C
chengduo 已提交
4065 4066 4067 4068
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
4069
create_test_act_fp16_class(TestSwish)
4070
create_test_act_fp16_class(TestHardSwish, check_prim=True)
4071
create_test_act_fp16_class(TestMish)
4072
create_test_act_fp16_class(TestLeakyRelu, check_prim=True, enable_cinn=True)
4073
create_test_act_fp16_class(
4074
    TestLeakyReluAlpha1, check_prim=True, enable_cinn=True
4075
)
4076 4077 4078 4079 4080 4081 4082 4083
create_test_act_fp16_class(
    TestLeakyReluAlpha2, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(
    TestLeakyReluAlpha3, check_prim=True, enable_cinn=True
)
create_test_act_fp16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_fp16_class(TestRsqrt, check_prim=True, enable_cinn=True)
A
Abhinav Arora 已提交
4084

4085

4086
def create_test_act_bf16_class(
4087 4088 4089 4090 4091
    parent,
    atol=1e-2,
    grad_check=True,
    check_dygraph=True,
    check_prim=False,
4092
    enable_cinn=False,
4093 4094
    grad_atol=1e-2,
    **kwargs
4095 4096
):
    @unittest.skipIf(
4097 4098 4099
        not core.is_compiled_with_cuda()
        or not core.is_bfloat16_supported(core.CUDAPlace(0)),
        "core is not compiled with CUDA and do not support bfloat16",
4100
    )
4101
    class TestActBF16(parent):
4102 4103 4104 4105 4106
        def setUp(self):
            super().setUp()
            for k, v in kwargs.items():
                setattr(self, k, v)

4107
        def init_dtype(self):
4108 4109
            self.dtype = np.float32

4110 4111 4112
        def if_enable_cinn(self):
            self.enable_cinn = enable_cinn

4113 4114 4115
        def convert_input_output(self):
            self.inputs = {'X': convert_float_to_uint16(self.inputs['X'])}
            self.outputs = {'Out': convert_float_to_uint16(self.outputs['Out'])}
4116 4117 4118 4119
            self.dtype = np.uint16

        def test_check_output(self):
            place = core.CUDAPlace(0)
4120 4121 4122
            self.check_output_with_place(
                place, atol=atol, check_prim=check_prim
            )
4123 4124 4125

        def test_check_grad(self):
            place = core.CUDAPlace(0)
4126 4127
            if grad_check:
                self.check_grad_with_place(
4128 4129 4130 4131 4132
                    place,
                    ['X'],
                    'Out',
                    max_relative_error=grad_atol,
                    check_prim=check_prim,
4133
                )
4134

4135
    cls_name = "{}_{}".format(parent.__name__, "BF16OP")
4136 4137 4138 4139
    TestActBF16.__name__ = cls_name
    globals()[cls_name] = TestActBF16


4140
create_test_act_bf16_class(TestActivation)
4141
create_test_act_bf16_class(TestExpFp32_Prim, check_prim=True)
4142 4143 4144 4145
create_test_act_bf16_class(TestExpm1)
create_test_act_bf16_class(TestSigmoid, check_prim=True)
create_test_act_bf16_class(TestSilu, check_prim=True)
create_test_act_bf16_class(TestLogSigmoid)
4146
create_test_act_bf16_class(TestTanh, check_prim=True)
4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172
create_test_act_bf16_class(TestTanhshrink)
create_test_act_bf16_class(TestHardShrink)
create_test_act_bf16_class(TestSoftshrink)
create_test_act_bf16_class(TestSqrt, check_prim=True)
create_test_act_bf16_class(TestSqrtComp, check_prim=True)
create_test_act_bf16_class(TestAbs, check_prim=True)
create_test_act_bf16_class(TestCeil, grad_check=False)
create_test_act_bf16_class(TestFloor, grad_check=False, check_prim=True)
create_test_act_bf16_class(TestCos)
create_test_act_bf16_class(TestTan)
create_test_act_bf16_class(TestCosh)
create_test_act_bf16_class(TestAcos)
create_test_act_bf16_class(TestSin)
create_test_act_bf16_class(TestSinh)
create_test_act_bf16_class(TestAsin)
create_test_act_bf16_class(TestAtan)
create_test_act_bf16_class(TestAcosh)
create_test_act_bf16_class(TestAsinh)
create_test_act_bf16_class(TestAtanh)
create_test_act_bf16_class(TestRound, grad_check=False)
create_test_act_bf16_class(TestRelu, check_prim=True)
create_test_act_bf16_class(
    TestGelu,
    check_prim=True,
    rev_comp_rtol=1e-2,
    rev_comp_atol=1e-2,
4173 4174
    cinn_rtol=1e-2,
    cinn_atol=1e-2,
4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199
)
create_test_act_bf16_class(TestBRelu)
create_test_act_bf16_class(TestRelu6)
create_test_act_bf16_class(TestSoftRelu, check_dygraph=False)
create_test_act_bf16_class(TestELU)
create_test_act_bf16_class(TestCELU)
create_test_act_bf16_class(TestReciprocal)
create_test_act_bf16_class(TestLog, check_prim=True)
if core.is_compiled_with_rocm():
    create_test_act_bf16_class(TestLog2)
else:
    create_test_act_bf16_class(TestLog2)
create_test_act_bf16_class(TestLog10)
create_test_act_bf16_class(TestLog1p)
create_test_act_bf16_class(TestSquare)
create_test_act_bf16_class(TestPow, check_prim=True)
create_test_act_bf16_class(TestPow_factor_tensor)
create_test_act_bf16_class(TestSTanh)
create_test_act_bf16_class(TestSoftplus)
create_test_act_bf16_class(TestSoftsign)
create_test_act_bf16_class(TestThresholdedRelu)
create_test_act_bf16_class(TestHardSigmoid)
create_test_act_bf16_class(TestSwish)
create_test_act_bf16_class(TestHardSwish, check_prim=True)
create_test_act_bf16_class(TestMish)
4200 4201 4202 4203 4204 4205
create_test_act_bf16_class(TestLeakyRelu, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha1, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha2, check_prim=True)
create_test_act_bf16_class(TestLeakyReluAlpha3, check_prim=True)
create_test_act_bf16_class(TestLeakyRelu_ZeroDim, check_prim=True)
create_test_act_bf16_class(TestRsqrt, check_prim=True)
4206

Q
qijun 已提交
4207 4208
if __name__ == "__main__":
    unittest.main()