test_activation_op.py 28.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22
import paddle
23 24
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
25 26


27
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
43
class TestActivation(OpTest):
Q
qijun 已提交
44 45
    def setUp(self):
        self.op_type = "exp"
46
        self.init_dtype()
47
        self.init_kernel_type()
48 49 50 51 52 53

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
54 55 56 57 58

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
59 60
        if self.dtype == np.float16:
            return
61
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
62

63
    def init_dtype(self):
64
        self.dtype = np.float64
65

66 67 68
    def init_kernel_type(self):
        pass

Q
qijun 已提交
69

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
class TestParameter(object):
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_out_name(self):
        with fluid.program_guard(fluid.Program()):
            data = fluid.layers.data(name="X", shape=[1])
            out = eval("paddle.%s(data, name='Y', out=data)" % self.op_type)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            result = exe.run(feed={"X": np.array([0.1])},
                             fetch_list=[data, out])
            self.assertEqual(result[0], result[1])

    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = eval("paddle.%s(x).numpy()" % self.op_type)
            z_expected = eval("np.%s(np_x)" % self.op_type)
            self.assertEqual(z, z_expected)


C
chengduo 已提交
100
class TestSigmoid(TestActivation):
Q
qijun 已提交
101 102
    def setUp(self):
        self.op_type = "sigmoid"
103 104 105 106 107 108 109
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
110

111 112 113
    def init_dtype(self):
        self.dtype = np.float32

114
    def test_check_grad(self):
115 116 117 118
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

119

C
chengduo 已提交
120
class TestLogSigmoid(TestActivation):
121 122
    def setUp(self):
        self.op_type = "logsigmoid"
123 124 125 126 127 128 129
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
130 131

    def test_check_grad(self):
132 133
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
134
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
135 136


137
class TestTanh(TestActivation, TestParameter):
138 139
    def setUp(self):
        self.op_type = "tanh"
140 141 142 143 144 145
        self.init_dtype()
        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
146 147

    def test_check_grad(self):
148 149
        if self.dtype == np.float16:
            return
150
        self.check_grad(['X'], 'Out')
151

152 153 154 155 156 157
    def init_dtype(self):
        #TODO If dtype is float64, the output (Out) has diff at CPUPlace
        # when using and not using inplace. Therefore, set dtype as float32
        # for now.
        self.dtype = np.float32

158

159
class TestAtan(TestActivation, TestParameter):
160 161 162 163 164 165 166 167 168 169 170 171 172
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
173
        self.check_grad(['X'], 'Out')
174

175 176 177 178 179 180 181 182
    def test_dygraph(self):
        with fluid.dygraph.guard():
            np_x = np.array([0.1])
            x = fluid.dygraph.to_variable(np_x)
            z = paddle.atan(x).numpy()
            z_expected = np.arctan(np_x)
            self.assertEqual(z, z_expected)

183

C
chengduo 已提交
184
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
185 186
    def setUp(self):
        self.op_type = "tanh_shrink"
187 188 189 190 191 192 193
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
194 195

    def test_check_grad(self):
196 197
        if self.dtype == np.float16:
            return
198
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
199

200

C
chengduo 已提交
201
class TestHardShrink(TestActivation):
202 203
    def setUp(self):
        self.op_type = "hard_shrink"
204 205
        self.init_dtype()

206
        threshold = 0.5
Z
zhupengyang 已提交
207
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) * 10
208 209
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
210 211

        self.attrs = {'lambda': threshold}
212 213
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
214 215

    def test_check_grad(self):
216 217
        if self.dtype == np.float16:
            return
218
        self.check_grad(['X'], 'Out')
219 220


C
chengduo 已提交
221
class TestSoftShrink(TestActivation):
222 223
    def setUp(self):
        self.op_type = "softshrink"
224 225
        self.init_dtype()

226
        lambda_val = 0.1
Z
zhupengyang 已提交
227
        x = np.random.uniform(0.25, 10, [10, 12]).astype(self.dtype)
228 229 230 231
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

232
        self.attrs = {'lambda': lambda_val}
233 234
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
235 236

    def test_check_grad(self):
237 238
        if self.dtype == np.float16:
            return
239
        self.check_grad(['X'], 'Out')
240

241

242
class TestSqrt(TestActivation, TestParameter):
243 244
    def setUp(self):
        self.op_type = "sqrt"
245 246 247 248 249 250 251
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
252 253

    def test_check_grad(self):
254 255
        if self.dtype == np.float16:
            return
256
        self.check_grad(['X'], 'Out')
257

258

Z
zhoukunsheng 已提交
259 260 261 262 263
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

Z
zhupengyang 已提交
264
        x = np.random.uniform(0.1, 1, [10, 12]).astype(self.dtype) * 10
Z
zhoukunsheng 已提交
265 266 267 268 269 270 271 272 273 274 275
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
276
class TestAbs(TestActivation):
277 278
    def setUp(self):
        self.op_type = "abs"
279 280
        self.init_dtype()

281
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
282
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
283
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
284
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
285 286
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
287 288 289 290
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
291 292

    def test_check_grad(self):
293 294
        if self.dtype == np.float16:
            return
295
        self.check_grad(['X'], 'Out')
296

297

C
chengduo 已提交
298
class TestCeil(TestActivation):
D
dzhwinter 已提交
299 300
    def setUp(self):
        self.op_type = "ceil"
301 302
        self.init_dtype()

Z
zhupengyang 已提交
303
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
304 305 306 307
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
308

D
dzhwinter 已提交
309
    # The same reason with TestFloor
C
chengduo 已提交
310
    def test_check_grad(self):
311 312 313
        pass


C
chengduo 已提交
314
class TestFloor(TestActivation):
D
dzhwinter 已提交
315 316
    def setUp(self):
        self.op_type = "floor"
317 318
        self.init_dtype()

Z
zhupengyang 已提交
319
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
320 321 322 323
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
324

D
dzhwinter 已提交
325
    # the gradient on floor, ceil, round is undefined.
326
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
327 328
    # The same reason with TestFloor
    def test_check_grad(self):
329 330 331
        pass


C
chengduo 已提交
332
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
333 334
    def setUp(self):
        self.op_type = "cos"
335 336
        self.init_dtype()

Z
zhupengyang 已提交
337
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
338 339 340 341
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
342 343

    def test_check_grad(self):
344 345
        if self.dtype == np.float16:
            return
346
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
347

348

349 350 351 352 353
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

Z
zhupengyang 已提交
354
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
355 356 357 358 359 360 361 362
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
363
        self.check_grad(['X'], 'Out')
364 365


366
class TestSin(TestActivation, TestParameter):
C
add sin  
chengduoZH 已提交
367 368
    def setUp(self):
        self.op_type = "sin"
369 370
        self.init_dtype()

Z
zhupengyang 已提交
371
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
372 373 374 375
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
376 377

    def test_check_grad(self):
378 379
        if self.dtype == np.float16:
            return
380
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
381 382


383 384 385 386 387
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

Z
zhupengyang 已提交
388
        x = np.random.uniform(-0.95, 0.95, [10, 12]).astype(self.dtype)
389 390 391 392 393 394 395 396
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
397
        self.check_grad(['X'], 'Out')
398 399


C
chengduo 已提交
400
class TestRound(TestActivation):
D
dzhwinter 已提交
401 402
    def setUp(self):
        self.op_type = "round"
403 404
        self.init_dtype()

Z
zhupengyang 已提交
405
        x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype)
406 407 408 409
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
410

C
chengduo 已提交
411
    def test_check_grad(self):
412 413 414
        pass


C
chengduo 已提交
415
class TestRelu(TestActivation):
416
    def setUp(self):
Q
qijun 已提交
417
        self.op_type = "relu"
K
Kexin Zhao 已提交
418 419 420
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
421 422
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
423 424 425 426
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
427 428

    def test_check_grad(self):
K
Kexin Zhao 已提交
429 430
        if self.dtype == np.float16:
            return
431
        self.check_grad(['X'], 'Out')
A
Adam 已提交
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
450
        self.check_grad(['X'], 'Out')
451 452


453 454 455 456 457 458 459 460 461 462
def gelu(x, approximate):
    if approximate:
        y_ref = 0.5 * x * (1.0 + np.tanh(
            np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
    else:
        y_ref = 0.5 * x * (1 + erf(x / np.sqrt(2)))
    return y_ref.astype(x.dtype)


class TestGeluApproximate(TestActivation):
C
Clementine 已提交
463 464 465
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
466 467 468
        approximate = True
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, approximate)
C
Clementine 已提交
469

470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"approximate": approximate}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out')


class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()
        approximate = False
C
Clementine 已提交
485
        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
486
        out = gelu(x, approximate)
C
Clementine 已提交
487 488 489

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
490
        self.attrs = {"approximate": approximate}
C
Clementine 已提交
491 492 493 494

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
495
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
496 497


C
chengduo 已提交
498
class TestBRelu(TestActivation):
499 500
    def setUp(self):
        self.op_type = "brelu"
501 502
        self.init_dtype()

Z
zhupengyang 已提交
503
        x = np.random.uniform(-5, 10, [10, 12]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
504 505
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
506 507
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
508
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
509 510 511
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
512 513 514

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
515
        self.outputs = {'Out': t}
516 517

    def test_check_grad(self):
518 519
        if self.dtype == np.float16:
            return
520
        self.check_grad(['X'], 'Out')
521

522

C
chengduo 已提交
523
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
524
    def setUp(self):
525
        self.op_type = "relu6"
526 527
        self.init_dtype()

Z
zhupengyang 已提交
528
        x = np.random.uniform(-1, 10, [10, 12]).astype(self.dtype)
529 530 531 532
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
533
        out = np.minimum(np.maximum(x, 0), threshold)
534

535
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
536
        self.attrs = {'threshold': threshold}
537
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
538

539 540 541
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
542
        self.check_grad(['X'], 'Out')
543 544


H
huangjun12 已提交
545 546 547 548 549
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

Z
zhupengyang 已提交
550
        x = np.random.uniform(-6, 6, [10, 12]).astype(self.dtype)
H
huangjun12 已提交
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
566
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
567 568


C
chengduo 已提交
569
class TestSoftRelu(TestActivation):
570 571
    def setUp(self):
        self.op_type = "soft_relu"
572 573 574
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
575
        threshold = 2.0
Q
qijun 已提交
576 577
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
Z
zhupengyang 已提交
578
        x[np.abs(x + threshold) < 0.005] = -threshold - 0.02
579 580 581
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
582 583 584 585 586
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
587 588

    def test_check_grad(self):
589 590
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
591
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
592

593

C
chengduo 已提交
594
class TestELU(TestActivation):
595 596
    def setUp(self):
        self.op_type = "elu"
597 598
        self.init_dtype()

Z
zhupengyang 已提交
599
        x = np.random.uniform(-3, 3, [10, 12]).astype(self.dtype)
600
        alpha = 1.
601
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
602 603 604 605
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
606
        self.outputs = {'Out': out}
607 608

    def test_check_grad(self):
609 610
        if self.dtype == np.float16:
            return
611
        self.check_grad(['X'], 'Out')
612 613


614
class TestELUOpError(unittest.TestCase):
615 616 617 618 619 620 621 622 623 624 625
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
626
class TestReciprocal(TestActivation):
Q
qijun 已提交
627 628
    def setUp(self):
        self.op_type = "reciprocal"
629 630 631 632 633 634 635
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
636 637

    def test_check_grad(self):
638 639
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
640
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
641 642


C
chengduo 已提交
643
class TestLog(TestActivation):
Q
qijun 已提交
644 645
    def setUp(self):
        self.op_type = "log"
646 647 648 649 650 651 652
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
653 654

    def test_check_grad(self):
655 656
        if self.dtype == np.float16:
            return
657
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
658

659

C
chengduo 已提交
660
class TestSquare(TestActivation):
Q
qijun 已提交
661 662
    def setUp(self):
        self.op_type = "square"
663 664 665 666 667 668 669
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
670 671

    def test_check_grad(self):
672 673
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
674
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
675

676

C
chengduo 已提交
677
class TestPow(TestActivation):
678 679
    def setUp(self):
        self.op_type = "pow"
680 681 682 683 684 685
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
686
        self.attrs = {'factor': 3.0}
687
        self.outputs = {'Out': out}
688 689

    def test_check_grad(self):
690 691
        if self.dtype == np.float16:
            return
692
        self.check_grad(['X'], 'Out')
693

694

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
717
        self.check_grad(['X'], 'Out')
718 719

    def test_api(self):
720
        import paddle
721 722 723 724 725
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")
726 727 728 729 730
        res = fluid.layers.data(
            name="res",
            shape=[11, 17],
            append_batch_size=False,
            dtype="float32")
731 732 733 734 735

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)
736 737 738 739 740
        out_3 = paddle.pow(x, factor_1, out=res)
        out_4 = paddle.pow(x, factor_1, name='pow_res')
        out_5 = paddle.pow(x, factor_1, out=res, name='pow_res')
        out_6 = paddle.pow(x, factor_2)
        self.assertEqual(('pow_res' in out_4.name), True)
741 742

        exe = fluid.Executor(place=fluid.CPUPlace())
743 744 745 746
        res_1, res_2, res_3, res, res_6 = exe.run(
            fluid.default_main_program(),
            feed={"x": input},
            fetch_list=[out_1, out_2, out_3, res, out_6])
747 748 749

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))
750 751
        assert np.array_equal(res_3, res)
        assert np.array_equal(res_6, np.power(input, 3))
752 753


C
chengduo 已提交
754
class TestSTanh(TestActivation):
755 756
    def setUp(self):
        self.op_type = "stanh"
757 758 759
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
760 761
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
762 763 764
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
765
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
766
        self.outputs = {'Out': out}
767

Q
qijun 已提交
768
    def test_check_grad(self):
769 770
        if self.dtype == np.float16:
            return
771
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
772

773

C
chengduo 已提交
774
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
775 776
    def setUp(self):
        self.op_type = "softplus"
777
        self.init_dtype()
C
chengduo 已提交
778
        self.dtype = np.float64
779 780 781 782 783 784

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
785 786

    def test_check_grad(self):
787 788
        if self.dtype == np.float16:
            return
789
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
790

791

C
chengduo 已提交
792
class TestSoftsign(TestActivation):
793 794
    def setUp(self):
        self.op_type = "softsign"
795 796 797 798 799 800 801
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
802 803

    def test_check_grad(self):
804 805
        if self.dtype == np.float16:
            return
806
        self.check_grad(['X'], 'Out')
807 808


C
chengduo 已提交
809
class TestThresholdedRelu(TestActivation):
810 811
    def setUp(self):
        self.op_type = "thresholded_relu"
812 813
        self.init_dtype()

814
        threshold = 0.25
Z
zhupengyang 已提交
815
        self.delta = 0.005
816
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
817 818

        # Same reason as TestAbs
Z
zhupengyang 已提交
819
        X[np.abs(X - threshold) < self.delta] = threshold + 0.2
820
        out = (X > threshold) * X
821

822
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
823
        self.attrs = {'threshold': threshold}
824
        self.outputs = {'Out': out}
825 826

    def test_check_grad(self):
827 828
        if self.dtype == np.float16:
            return
829
        self.check_grad(['X'], 'Out')
830 831


C
chengduo 已提交
832
class TestHardSigmoid(TestActivation):
833 834
    def setUp(self):
        self.op_type = "hard_sigmoid"
835 836
        self.init_dtype()

Z
zhupengyang 已提交
837
        X = np.random.uniform(-5, 5, [10, 12]).astype("float32")
838 839 840 841 842
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

Z
zhupengyang 已提交
843 844
        self.delta = 0.005

845
        # Same reason as TestAbs
Z
zhupengyang 已提交
846 847
        X[(X - lower_threshold) < self.delta] = lower_threshold - 0.02
        X[(X - upper_threshold) < self.delta] = upper_threshold + 0.02
848 849

        temp = X * slope + offset
850 851 852 853
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
854 855

    def test_check_grad(self):
856 857
        if self.dtype == np.float16:
            return
Z
zhupengyang 已提交
858
        self.check_grad(['X'], 'Out')
859

860

C
chengduo 已提交
861
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
862 863
    def setUp(self):
        self.op_type = "swish"
864 865 866 867 868 869 870 871 872
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
873 874

    def test_check_grad(self):
875 876
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
877
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
878

879

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
899 900 901 902 903 904 905 906 907 908
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
909

C
chengduo 已提交
910
        def test_check_output(self):
911
            place = core.CUDAPlace(0)
C
chengduo 已提交
912 913 914
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
915

C
chengduo 已提交
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
940
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
941
create_test_act_fp16_class(TestSin)
942 943
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
944 945
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
946
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
947 948 949 950 951 952 953 954
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
955
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
956 957 958 959 960 961
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
962
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
963

Q
qijun 已提交
964 965
if __name__ == "__main__":
    unittest.main()