test_activation_op.py 25.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Q
qijun 已提交
17 18
import unittest
import numpy as np
K
Kexin Zhao 已提交
19
import paddle.fluid.core as core
Q
qijun 已提交
20
from op_test import OpTest
C
Clementine 已提交
21
from scipy.special import expit, erf
22 23
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
Q
qijun 已提交
24 25


26
class TestSqrtOpError(unittest.TestCase):
Z
Zhaolong Xing 已提交
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of sqrt op must be Variable or numpy.ndarray.
            in1 = 1
            self.assertRaises(TypeError, fluid.layers.sqrt, in1)
            # The input dtype of sqrt op must be float16, float32, float64.
            in2 = fluid.layers.data(
                name='input2', shape=[12, 10], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.sqrt, in2)

            in3 = fluid.layers.data(
                name='input3', shape=[12, 10], dtype="float16")
            fluid.layers.sqrt(x=in3)


C
chengduo 已提交
42
class TestActivation(OpTest):
Q
qijun 已提交
43 44
    def setUp(self):
        self.op_type = "exp"
45 46
        self.dtype = np.float32
        self.init_dtype()
47
        self.init_kernel_type()
48 49 50 51 52 53

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
54 55 56 57 58

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
59 60
        if self.dtype == np.float16:
            return
61
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
62

63
    def init_dtype(self):
C
chengduo 已提交
64
        self.dtype = np.float32
65

66 67 68
    def init_kernel_type(self):
        pass

Q
qijun 已提交
69

C
chengduo 已提交
70
class TestSigmoid(TestActivation):
Q
qijun 已提交
71 72
    def setUp(self):
        self.op_type = "sigmoid"
73 74 75 76 77 78 79
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
80

81
    def test_check_grad(self):
82 83 84 85
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

86

C
chengduo 已提交
87
class TestLogSigmoid(TestActivation):
88 89
    def setUp(self):
        self.op_type = "logsigmoid"
90 91 92 93 94 95 96
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
97 98

    def test_check_grad(self):
99 100
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
101
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
102 103


C
chengduo 已提交
104
class TestTanh(TestActivation):
105 106
    def setUp(self):
        self.op_type = "tanh"
107 108 109 110 111 112 113
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
114 115

    def test_check_grad(self):
116 117
        if self.dtype == np.float16:
            return
118
        self.check_grad(['X'], 'Out')
119 120


121 122 123 124 125 126 127 128 129 130 131 132 133 134
class TestAtan(TestActivation):
    def setUp(self):
        self.op_type = "atan"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.arctan(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
135
        self.check_grad(['X'], 'Out')
136 137


C
chengduo 已提交
138
class TestTanhShrink(TestActivation):
K
Kavya Srinet 已提交
139 140
    def setUp(self):
        self.op_type = "tanh_shrink"
141 142 143 144 145 146 147
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
148 149

    def test_check_grad(self):
150 151
        if self.dtype == np.float16:
            return
152
        self.check_grad(['X'], 'Out')
K
Kavya Srinet 已提交
153

154

C
chengduo 已提交
155
class TestHardShrink(TestActivation):
156 157
    def setUp(self):
        self.op_type = "hard_shrink"
158 159
        self.init_dtype()

160
        threshold = 0.5
161 162 163
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
164 165

        self.attrs = {'lambda': threshold}
166 167
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
168 169

    def test_check_grad(self):
170 171
        if self.dtype == np.float16:
            return
172
        self.check_grad(['X'], 'Out')
173 174


C
chengduo 已提交
175
class TestSoftShrink(TestActivation):
176 177
    def setUp(self):
        self.op_type = "softshrink"
178 179
        self.init_dtype()

180
        lambda_val = 0.1
181 182 183 184 185
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

186
        self.attrs = {'lambda': lambda_val}
187 188
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
189 190

    def test_check_grad(self):
191 192
        if self.dtype == np.float16:
            return
193
        self.check_grad(['X'], 'Out')
194

195

C
chengduo 已提交
196
class TestSqrt(TestActivation):
197 198
    def setUp(self):
        self.op_type = "sqrt"
199 200 201 202 203 204 205
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
206 207

    def test_check_grad(self):
208 209
        if self.dtype == np.float16:
            return
210
        self.check_grad(['X'], 'Out')
211

212

Z
zhoukunsheng 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
class TestRsqrt(TestActivation):
    def setUp(self):
        self.op_type = "rsqrt"
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [2, 3]).astype(self.dtype)
        out = 1.0 / np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.0005)


C
chengduo 已提交
230
class TestAbs(TestActivation):
231 232
    def setUp(self):
        self.op_type = "abs"
233 234
        self.init_dtype()

235
        x = np.random.uniform(-1, 1, [4, 25]).astype(self.dtype)
C
chengduo 已提交
236
        # Because we set delta = 0.005 in calculating numeric gradient,
Q
qijun 已提交
237
        # if x is too small, such as 0.002, x_neg will be -0.003
C
chengduo 已提交
238
        # x_pos will be 0.007, so the numeric gradient is inaccurate.
Q
qijun 已提交
239 240
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
241 242 243 244
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
245 246

    def test_check_grad(self):
247 248
        if self.dtype == np.float16:
            return
249
        self.check_grad(['X'], 'Out')
250

251

C
chengduo 已提交
252
class TestCeil(TestActivation):
D
dzhwinter 已提交
253 254
    def setUp(self):
        self.op_type = "ceil"
255 256 257 258 259 260 261
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
262

D
dzhwinter 已提交
263
    # The same reason with TestFloor
C
chengduo 已提交
264
    def test_check_grad(self):
265 266 267
        pass


C
chengduo 已提交
268
class TestFloor(TestActivation):
D
dzhwinter 已提交
269 270
    def setUp(self):
        self.op_type = "floor"
271 272 273 274 275 276 277
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
278

D
dzhwinter 已提交
279
    # the gradient on floor, ceil, round is undefined.
280
    # we return zero as gradient, but the numpy return nan
C
chengduo 已提交
281 282
    # The same reason with TestFloor
    def test_check_grad(self):
283 284 285
        pass


C
chengduo 已提交
286
class TestCos(TestActivation):
C
add cos  
chengduoZH 已提交
287 288
    def setUp(self):
        self.op_type = "cos"
289 290 291 292 293 294 295
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
296 297

    def test_check_grad(self):
298 299
        if self.dtype == np.float16:
            return
300
        self.check_grad(['X'], 'Out')
C
add sin  
chengduoZH 已提交
301

302

303 304 305 306 307 308 309 310 311 312 313 314 315 316
class TestAcos(TestActivation):
    def setUp(self):
        self.op_type = "acos"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arccos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
317
        self.check_grad(['X'], 'Out')
318 319


C
chengduo 已提交
320
class TestSin(TestActivation):
C
add sin  
chengduoZH 已提交
321 322
    def setUp(self):
        self.op_type = "sin"
323 324 325 326 327 328 329
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
330 331

    def test_check_grad(self):
332 333
        if self.dtype == np.float16:
            return
334
        self.check_grad(['X'], 'Out')
C
add cos  
chengduoZH 已提交
335 336


337 338 339 340 341 342 343 344 345 346 347 348 349 350
class TestAsin(TestActivation):
    def setUp(self):
        self.op_type = "asin"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.arcsin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
351
        self.check_grad(['X'], 'Out')
352 353


C
chengduo 已提交
354
class TestRound(TestActivation):
D
dzhwinter 已提交
355 356
    def setUp(self):
        self.op_type = "round"
357 358 359 360 361 362 363
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
364

C
chengduo 已提交
365
    def test_check_grad(self):
366 367 368
        pass


C
chengduo 已提交
369
class TestRelu(TestActivation):
370
    def setUp(self):
Q
qijun 已提交
371
        self.op_type = "relu"
K
Kexin Zhao 已提交
372 373 374
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
375 376
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
377 378 379 380
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
381 382

    def test_check_grad(self):
K
Kexin Zhao 已提交
383 384
        if self.dtype == np.float16:
            return
385
        self.check_grad(['X'], 'Out')
A
Adam 已提交
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403


class TestLeakyRelu(TestActivation):
    def setUp(self):
        self.op_type = "leaky_relu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
404
        self.check_grad(['X'], 'Out')
405 406


C
Clementine 已提交
407 408 409 410 411 412 413 414 415 416 417 418 419 420
class TestGelu(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 0.5 * x * (1.0 + erf(x / np.sqrt(2.0)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
421
        self.check_grad(['X'], 'Out')
C
Clementine 已提交
422 423


C
chengduo 已提交
424
class TestBRelu(TestActivation):
425 426
    def setUp(self):
        self.op_type = "brelu"
427 428 429
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
430 431
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
432 433
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
434
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
435 436 437
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
438 439 440

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
441
        self.outputs = {'Out': t}
442 443

    def test_check_grad(self):
444 445
        if self.dtype == np.float16:
            return
446
        self.check_grad(['X'], 'Out')
447

448

C
chengduo 已提交
449
class TestRelu6(TestActivation):
K
Kavya Srinet 已提交
450
    def setUp(self):
451
        self.op_type = "relu6"
452 453 454
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
455 456 457 458
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
459
        out = np.minimum(np.maximum(x, 0), threshold)
460

461
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
462
        self.attrs = {'threshold': threshold}
463
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
464

465 466 467
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
468
        self.check_grad(['X'], 'Out')
469 470


H
huangjun12 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
class TestHardSwish(TestActivation):
    def setUp(self):
        self.op_type = 'hard_swish'
        self.init_dtype()

        x = np.random.uniform(-6, 6, [4, 4]).astype(self.dtype)
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        #the same with TestAbs
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02
        out = x * np.minimum(np.maximum(x + offset, 0), threshold) / scale

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset}
        self.outputs = {'Out': out}

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
492
        self.check_grad(['X'], 'Out')
H
huangjun12 已提交
493 494


C
chengduo 已提交
495
class TestSoftRelu(TestActivation):
496 497
    def setUp(self):
        self.op_type = "soft_relu"
498 499 500
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
501
        threshold = 2.0
Q
qijun 已提交
502 503 504
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
505 506 507
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
508 509 510 511 512
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
513 514

    def test_check_grad(self):
515 516
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
517
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
518

519

C
chengduo 已提交
520
class TestELU(TestActivation):
521 522
    def setUp(self):
        self.op_type = "elu"
523 524 525
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
526
        alpha = 1.
527
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
528 529 530 531
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
532
        self.outputs = {'Out': out}
533 534

    def test_check_grad(self):
535 536
        if self.dtype == np.float16:
            return
537
        self.check_grad(['X'], 'Out')
538 539


540
class TestELUOpError(unittest.TestCase):
541 542 543 544 545 546 547 548 549 550 551
    def test_errors(self):
        with program_guard(Program(), Program()):
            # The input type of elu_op must be Variable.
            x1 = fluid.create_lod_tensor(
                np.array([[-1]]), [[1]], fluid.CPUPlace())
            self.assertRaises(TypeError, fluid.layers.elu, x1)
            # The input dtype of elu_op must be float16 float32 or float64.
            x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32")
            self.assertRaises(TypeError, fluid.layers.elu, x2)


C
chengduo 已提交
552
class TestReciprocal(TestActivation):
Q
qijun 已提交
553 554
    def setUp(self):
        self.op_type = "reciprocal"
555 556 557 558 559 560 561
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
562 563

    def test_check_grad(self):
564 565
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
566
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
567 568


C
chengduo 已提交
569
class TestLog(TestActivation):
Q
qijun 已提交
570 571
    def setUp(self):
        self.op_type = "log"
572 573 574 575 576 577 578
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
579 580

    def test_check_grad(self):
581 582
        if self.dtype == np.float16:
            return
583
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
584

585

C
chengduo 已提交
586
class TestSquare(TestActivation):
Q
qijun 已提交
587 588
    def setUp(self):
        self.op_type = "square"
589 590 591 592 593 594 595
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
596 597

    def test_check_grad(self):
598 599
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
600
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
601

602

C
chengduo 已提交
603
class TestPow(TestActivation):
604 605
    def setUp(self):
        self.op_type = "pow"
606 607 608 609 610 611
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
612
        self.attrs = {'factor': 3.0}
613
        self.outputs = {'Out': out}
614 615

    def test_check_grad(self):
616 617
        if self.dtype == np.float16:
            return
618
        self.check_grad(['X'], 'Out')
619

620

621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
class TestPow_factor_tensor(TestActivation):
    def setUp(self):
        self.op_type = "pow"
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {
            'X': OpTest.np_dtype_to_fluid_dtype(x),
            'FactorTensor': np.array([3.0]).astype("float32")
        }

        self.attrs = {}
        self.outputs = {'Out': out}

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
643
        self.check_grad(['X'], 'Out')
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665

    def test_api(self):
        import paddle.fluid as fluid

        input = np.random.uniform(1, 2, [11, 17]).astype("float32")
        x = fluid.layers.data(
            name="x", shape=[11, 17], append_batch_size=False, dtype="float32")

        factor_1 = 2.0
        factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
        out_1 = fluid.layers.pow(x, factor=factor_1)
        out_2 = fluid.layers.pow(x, factor=factor_2)

        exe = fluid.Executor(place=fluid.CPUPlace())
        res_1, res_2 = exe.run(fluid.default_main_program(),
                               feed={"x": input},
                               fetch_list=[out_1, out_2])

        assert np.array_equal(res_1, np.power(input, 2))
        assert np.array_equal(res_2, np.power(input, 3))


C
chengduo 已提交
666
class TestSTanh(TestActivation):
667 668
    def setUp(self):
        self.op_type = "stanh"
669 670 671
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
672 673
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
674 675 676
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
677
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
678
        self.outputs = {'Out': out}
679

Q
qijun 已提交
680
    def test_check_grad(self):
681 682
        if self.dtype == np.float16:
            return
683
        self.check_grad(['X'], 'Out')
Q
qijun 已提交
684

685

C
chengduo 已提交
686
class TestSoftplus(TestActivation):
K
kexinzhao 已提交
687 688
    def setUp(self):
        self.op_type = "softplus"
689
        self.init_dtype()
C
chengduo 已提交
690
        self.dtype = np.float64
691 692 693 694 695 696

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
697 698

    def test_check_grad(self):
699 700
        if self.dtype == np.float16:
            return
701
        self.check_grad(['X'], 'Out')
K
kexinzhao 已提交
702

703

C
chengduo 已提交
704
class TestSoftsign(TestActivation):
705 706
    def setUp(self):
        self.op_type = "softsign"
707 708 709 710 711 712 713
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
714 715

    def test_check_grad(self):
716 717
        if self.dtype == np.float16:
            return
718
        self.check_grad(['X'], 'Out')
719 720


C
chengduo 已提交
721
class TestThresholdedRelu(TestActivation):
722 723
    def setUp(self):
        self.op_type = "thresholded_relu"
724 725
        self.init_dtype()

726 727
        threshold = 0.25
        self.relative_error = 0.005
728
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
729 730 731

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
732
        out = (X > threshold) * X
733

734
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
735
        self.attrs = {'threshold': threshold}
736
        self.outputs = {'Out': out}
737 738

    def test_check_grad(self):
739 740
        if self.dtype == np.float16:
            return
741
        self.check_grad(['X'], 'Out')
742 743


C
chengduo 已提交
744
class TestHardSigmoid(TestActivation):
745 746
    def setUp(self):
        self.op_type = "hard_sigmoid"
747 748
        self.init_dtype()

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
764 765 766 767
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
768 769

    def test_check_grad(self):
770 771
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
772
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
773

774

C
chengduo 已提交
775
class TestSwish(TestActivation):
A
Abhinav Arora 已提交
776 777
    def setUp(self):
        self.op_type = "swish"
778 779 780 781 782 783 784 785 786
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
787 788

    def test_check_grad(self):
789 790
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
791
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
792

793

794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
#------------------ Test Cudnn Activation----------------------
def create_test_act_cudnn_class(parent, atol=1e-3, grad_atol=1e-3):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActCudnn(parent):
        def init_kernel_type(self):
            self.attrs = {"use_cudnn": True}

    cls_name = "{0}_{1}".format(parent.__name__, "cudnn")
    TestActCudnn.__name__ = cls_name
    globals()[cls_name] = TestActCudnn


create_test_act_cudnn_class(TestRelu)
create_test_act_cudnn_class(TestRelu6)
create_test_act_cudnn_class(TestSigmoid)
create_test_act_cudnn_class(TestTanh)


C
chengduo 已提交
813 814 815 816 817 818 819 820 821 822
#------------------ Test Fp16 ----------------------
def create_test_act_fp16_class(parent,
                               atol=1e-3,
                               grad_check=True,
                               grad_atol=0.80):
    @unittest.skipIf(not core.is_compiled_with_cuda(),
                     "core is not compiled with CUDA")
    class TestActFp16(parent):
        def init_dtype(self):
            self.dtype = np.float16
823

C
chengduo 已提交
824
        def test_check_output(self):
825
            place = core.CUDAPlace(0)
C
chengduo 已提交
826 827 828
            support_fp16 = core.is_float16_supported(place)
            if support_fp16:
                self.check_output_with_place(place, atol=atol)
829

C
chengduo 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
        def test_check_grad(self):
            place = core.CUDAPlace(0)
            support_fp16 = core.is_float16_supported(place)
            if support_fp16 and grad_check:
                self.check_grad_with_place(
                    place, ['X'], 'Out', max_relative_error=grad_atol)

    cls_name = "{0}_{1}".format(parent.__name__, "fp16")
    TestActFp16.__name__ = cls_name
    globals()[cls_name] = TestActFp16


create_test_act_fp16_class(TestActivation)
create_test_act_fp16_class(TestSigmoid)
create_test_act_fp16_class(TestLogSigmoid)
create_test_act_fp16_class(TestTanh)
create_test_act_fp16_class(TestTanhShrink)
create_test_act_fp16_class(TestHardShrink)
create_test_act_fp16_class(TestSoftShrink)
create_test_act_fp16_class(TestSqrt)
create_test_act_fp16_class(TestAbs)
create_test_act_fp16_class(TestCeil, grad_check=False)
create_test_act_fp16_class(TestFloor, grad_check=False)
create_test_act_fp16_class(TestCos, grad_atol=0.85)
854
create_test_act_fp16_class(TestAcos, grad_atol=0.85)
C
chengduo 已提交
855
create_test_act_fp16_class(TestSin)
856 857
create_test_act_fp16_class(TestAsin)
create_test_act_fp16_class(TestAtan)
C
chengduo 已提交
858 859
create_test_act_fp16_class(TestRound, grad_check=False)
create_test_act_fp16_class(TestRelu)
C
Clementine 已提交
860
create_test_act_fp16_class(TestGelu)
C
chengduo 已提交
861 862 863 864 865 866 867 868
create_test_act_fp16_class(TestBRelu)
create_test_act_fp16_class(TestRelu6)
create_test_act_fp16_class(TestSoftRelu)
create_test_act_fp16_class(TestELU)
create_test_act_fp16_class(TestReciprocal)
create_test_act_fp16_class(TestLog)
create_test_act_fp16_class(TestSquare)
create_test_act_fp16_class(TestPow, atol=5e-2)
869
create_test_act_fp16_class(TestPow_factor_tensor, atol=5e-2)
C
chengduo 已提交
870 871 872 873 874 875
create_test_act_fp16_class(TestSTanh, grad_atol=0.9)
create_test_act_fp16_class(TestSoftplus)
create_test_act_fp16_class(TestSoftsign)
create_test_act_fp16_class(TestThresholdedRelu)
create_test_act_fp16_class(TestHardSigmoid)
create_test_act_fp16_class(TestSwish)
H
huangjun12 已提交
876
create_test_act_fp16_class(TestHardSwish)
A
Abhinav Arora 已提交
877

Q
qijun 已提交
878 879
if __name__ == "__main__":
    unittest.main()