test_activation_op.py 31.1 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Q
qijun 已提交
15 16
import unittest
import numpy as np
K
Kexin Zhao 已提交
17
import paddle.fluid.core as core
Q
qijun 已提交
18
from op_test import OpTest
A
Abhinav Arora 已提交
19
from scipy.special import expit
Q
qijun 已提交
20 21 22 23 24


class TestExp(OpTest):
    def setUp(self):
        self.op_type = "exp"
25 26 27 28 29 30 31 32
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.exp(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
33 34 35 36 37

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
38 39
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
40
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55
    def init_dtype(self):
        pass


class TestFP16Exp(TestExp):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
56 57 58 59

class TestSigmoid(OpTest):
    def setUp(self):
        self.op_type = "sigmoid"
60 61 62 63 64 65 66 67
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
68 69 70 71

    def test_check_output(self):
        self.check_output()

72
    def test_check_grad(self):
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.01)

    def init_dtype(self):
        pass


class TestFP16Sigmoid(TestSigmoid):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
90 91


92 93 94
class TestLogSigmoid(OpTest):
    def setUp(self):
        self.op_type = "logsigmoid"
95 96 97 98 99 100 101 102
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 / (1 + np.exp(-x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
103 104 105 106 107

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
108 109
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
110
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
111

112 113 114 115 116 117 118 119 120 121 122 123 124 125
    def init_dtype(self):
        pass


class TestFP16LogSigmoid(TestLogSigmoid):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

126

127 128 129
class TestTanh(OpTest):
    def setUp(self):
        self.op_type = "tanh"
130 131 132 133 134 135 136 137
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
138 139 140 141 142

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
143 144
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
145
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
146

147 148 149 150 151 152 153 154 155 156 157 158 159 160
    def init_dtype(self):
        pass


class TestFP16Tanh(TestTanh):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

161

K
Kavya Srinet 已提交
162 163 164
class TestTanhShrink(OpTest):
    def setUp(self):
        self.op_type = "tanh_shrink"
165 166 167 168 169 170 171 172
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [10, 17]).astype(self.dtype)
        out = x - np.tanh(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
173 174 175 176 177

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
178 179
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
180
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
K
Kavya Srinet 已提交
181

182 183 184 185 186 187 188 189 190 191 192 193 194 195
    def init_dtype(self):
        pass


class TestFP16TanhShrink(TestTanhShrink):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

K
Kavya Srinet 已提交
196

197 198 199
class TestHardShrink(OpTest):
    def setUp(self):
        self.op_type = "hard_shrink"
200 201 202
        self.dtype = np.float32
        self.init_dtype()

203
        threshold = 0.5
204 205 206
        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out[(out >= -threshold) & (out <= threshold)] = 0
207 208

        self.attrs = {'lambda': threshold}
209 210
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
211 212 213 214 215

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
216 217
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
218
        self.check_grad(['X'], 'Out', max_relative_error=0.005)
219

220 221 222 223 224 225 226 227 228 229 230 231 232 233
    def init_dtype(self):
        pass


class TestFP16HardShrink(TestHardShrink):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

234

235 236 237
class TestSoftShrink(OpTest):
    def setUp(self):
        self.op_type = "softshrink"
238 239 240
        self.dtype = np.float32
        self.init_dtype()

241
        lambda_val = 0.1
242 243 244 245 246
        x = np.random.uniform(0.25, 10, [4, 4]).astype(self.dtype)
        out = np.copy(x)
        out = (out < -lambda_val) * (out + lambda_val) + (out > lambda_val) * (
            out - lambda_val)

247
        self.attrs = {'lambda': lambda_val}
248 249
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
250 251 252 253 254

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
255 256
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
257
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
258

259 260 261 262 263 264 265 266 267 268 269 270 271 272
    def init_dtype(self):
        pass


class TestFP16SoftShrink(TestSoftShrink):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

273

274 275 276
class TestSqrt(OpTest):
    def setUp(self):
        self.op_type = "sqrt"
277 278 279 280 281 282 283 284
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.sqrt(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
285 286 287 288 289

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
290 291
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
292
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
293

294 295 296 297 298 299 300 301 302 303 304 305 306 307
    def init_dtype(self):
        pass


class TestFP16Sqrt(TestSqrt):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

308 309 310 311

class TestAbs(OpTest):
    def setUp(self):
        self.op_type = "abs"
312 313 314 315
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Q
qijun 已提交
316 317 318 319 320
        # Because we set delta = 0.005 in caculating numeric gradient,
        # if x is too small, such as 0.002, x_neg will be -0.003
        # x_pos will be 0.007, so the numeric gradient is unaccurate.
        # we should avoid this
        x[np.abs(x) < 0.005] = 0.02
321 322 323 324
        out = np.abs(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
325 326 327 328 329

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
330 331
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
332
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
333

334 335 336 337 338 339 340 341 342 343 344 345 346 347
    def init_dtype(self):
        pass


class TestFP16Abs(TestAbs):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

348

D
dzhwinter 已提交
349 350 351
class TestCeil(OpTest):
    def setUp(self):
        self.op_type = "ceil"
352 353 354 355 356 357 358 359
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.ceil(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
360 361 362 363 364

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
365 366
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
367
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
368

369 370 371 372 373 374 375 376 377 378 379 380 381 382
    def init_dtype(self):
        pass


class TestFP16Ceil(TestCeil):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

D
dzhwinter 已提交
383 384 385 386

class TestFloor(OpTest):
    def setUp(self):
        self.op_type = "floor"
387 388 389 390 391 392 393 394
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.floor(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
395 396 397 398 399

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
400 401
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
402
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
403

404 405 406 407 408 409 410 411 412 413 414 415 416 417
    def init_dtype(self):
        pass


class TestFP16Floor(TestFloor):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

D
dzhwinter 已提交
418

C
add cos  
chengduoZH 已提交
419 420 421
class TestCos(OpTest):
    def setUp(self):
        self.op_type = "cos"
422 423 424 425 426 427 428 429
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.cos(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add sin  
chengduoZH 已提交
430 431 432 433 434

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
435 436
        if self.dtype == np.float16:
            return
C
add sin  
chengduoZH 已提交
437 438
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

439 440 441 442 443 444 445 446 447 448 449 450 451 452
    def init_dtype(self):
        pass


class TestFP16Cos(TestCos):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
add sin  
chengduoZH 已提交
453 454 455 456

class TestSin(OpTest):
    def setUp(self):
        self.op_type = "sin"
457 458 459 460 461 462 463 464
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.sin(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
C
add cos  
chengduoZH 已提交
465 466 467 468 469

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
470 471
        if self.dtype == np.float16:
            return
C
add cos  
chengduoZH 已提交
472 473
        self.check_grad(['X'], 'Out', max_relative_error=0.007)

474 475 476 477 478 479 480 481 482 483 484 485 486 487
    def init_dtype(self):
        pass


class TestFP16Sin(TestSin):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

C
add cos  
chengduoZH 已提交
488

D
dzhwinter 已提交
489 490 491
class TestRound(OpTest):
    def setUp(self):
        self.op_type = "round"
492 493 494 495 496 497 498 499
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
        out = np.round(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
D
dzhwinter 已提交
500 501 502 503 504

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
505 506
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
507
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
D
dzhwinter 已提交
508

509 510 511 512 513 514 515 516 517 518 519 520 521 522
    def init_dtype(self):
        pass


class TestFP16Round(TestRound):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

D
dzhwinter 已提交
523

Q
qijun 已提交
524
class TestRelu(OpTest):
525
    def setUp(self):
Q
qijun 已提交
526
        self.op_type = "relu"
K
Kexin Zhao 已提交
527 528 529 530
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
Q
qijun 已提交
531 532
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
K
Kexin Zhao 已提交
533 534 535 536
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
537 538 539 540 541

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
K
Kexin Zhao 已提交
542 543
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
544
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
545

K
Kexin Zhao 已提交
546 547 548 549 550 551 552 553 554 555 556 557 558 559
    def init_dtype(self):
        pass


class TestFP16Relu(TestRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

560 561 562 563

class TestBRelu(OpTest):
    def setUp(self):
        self.op_type = "brelu"
564 565 566 567
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
568 569
        t_min = 1.0
        t_max = 4.0
Q
qijun 已提交
570 571
        # The same with TestAbs
        x[np.abs(x - t_min) < 0.005] = t_min + 0.02
Q
qijun 已提交
572
        x[np.abs(x - t_max) < 0.005] = t_max + 0.02
573 574 575
        t = np.copy(x)
        t[t < t_min] = t_min
        t[t > t_max] = t_max
576 577 578

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'t_min': t_min, 't_max': t_max}
F
fengjiayi 已提交
579
        self.outputs = {'Out': t}
580 581 582 583 584

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
585 586
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
587
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
588

589 590 591 592 593 594 595 596 597 598 599 600 601 602
    def init_dtype(self):
        pass


class TestFP16BRelu(TestBRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

603

604
class TestRelu6(OpTest):
K
Kavya Srinet 已提交
605
    def setUp(self):
606
        self.op_type = "relu6"
607 608 609 610
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [4, 10]).astype(self.dtype)
611 612 613 614
        threshold = 6.0
        # The same with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
615
        out = np.minimum(np.maximum(x, 0), threshold)
616

617
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
618
        self.attrs = {'threshold': threshold}
619
        self.outputs = {'Out': out}
K
Kavya Srinet 已提交
620 621 622 623

    def test_check_output(self):
        self.check_output()

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', max_relative_error=0.02)

    def init_dtype(self):
        pass


class TestFP16Relu6(TestRelu6):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)
K
Kavya Srinet 已提交
642 643


644 645 646
class TestSoftRelu(OpTest):
    def setUp(self):
        self.op_type = "soft_relu"
647 648 649 650
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
Y
Yang Yang(Tony) 已提交
651
        threshold = 2.0
Q
qijun 已提交
652 653 654
        # The same reason with TestAbs
        x[np.abs(x - threshold) < 0.005] = threshold + 0.02
        x[np.abs(x + threshold) < 0.005] = -threshold + 0.02
655 656 657
        t = np.copy(x)
        t[t < -threshold] = -threshold
        t[t > threshold] = threshold
658 659 660 661 662
        out = np.log((np.exp(t) + 1))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {'threshold': threshold}
        self.outputs = {'Out': out}
663 664 665 666 667

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
668 669
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
670
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
671

672 673 674 675 676 677 678 679 680 681 682 683 684 685
    def init_dtype(self):
        pass


class TestFP16SoftRelu(TestSoftRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

686

687 688 689
class TestELU(OpTest):
    def setUp(self):
        self.op_type = "elu"
690 691 692 693
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-3, 3, [4, 4]).astype(self.dtype)
694
        alpha = 1.
695
        out = np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
696 697 698 699
        # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1)
        # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here
        self.inputs = {'X': x}
        self.attrs = {'alpha': alpha}
700
        self.outputs = {'Out': out}
701 702 703 704 705

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
706 707
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
708
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
709

710 711 712 713 714 715 716 717 718 719 720 721 722 723
    def init_dtype(self):
        pass


class TestFP16ELU(TestELU):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

724

Q
qijun 已提交
725 726 727
class TestReciprocal(OpTest):
    def setUp(self):
        self.op_type = "reciprocal"
728 729 730 731 732 733 734 735
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.reciprocal(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
736 737 738 739 740

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
741 742
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
743
        self.check_grad(['X'], 'Out', max_relative_error=0.01)
Q
qijun 已提交
744

745 746 747 748 749 750 751 752 753 754 755 756 757 758
    def init_dtype(self):
        pass


class TestFP16Reciprocal(TestReciprocal):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
759 760 761 762

class TestLog(OpTest):
    def setUp(self):
        self.op_type = "log"
763 764 765 766 767 768 769 770
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.log(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
771 772 773 774 775

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
776 777
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
778
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
779

780 781 782 783 784 785 786 787 788 789 790 791 792 793
    def init_dtype(self):
        pass


class TestFP16Log(TestLog):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
794 795 796 797

class TestSquare(OpTest):
    def setUp(self):
        self.op_type = "square"
798 799 800 801 802 803 804 805
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        out = np.square(x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
Q
qijun 已提交
806 807 808 809 810

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
811 812
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
813
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
814

815 816 817 818 819 820 821 822 823 824 825 826 827 828
    def init_dtype(self):
        pass


class TestFP16Square(TestSquare):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
829

830 831 832
class TestPow(OpTest):
    def setUp(self):
        self.op_type = "pow"
833 834 835 836 837 838 839
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(1, 2, [11, 17]).astype(self.dtype)
        out = np.power(x, 3)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
Y
Yang Yang(Tony) 已提交
840
        self.attrs = {'factor': 3.0}
841
        self.outputs = {'Out': out}
842 843 844 845 846

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
847 848
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
849
        self.check_grad(['X'], 'Out', max_relative_error=0.02)
850

851 852 853 854 855 856 857 858 859 860 861 862 863 864
    def init_dtype(self):
        pass


class TestFP16Pow(TestPow):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=5e-2)

865 866 867 868

class TestSTanh(OpTest):
    def setUp(self):
        self.op_type = "stanh"
869 870 871 872
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
873 874
        scale_a = 2.0 / 3.0
        scale_b = 1.7159
875 876 877
        out = scale_b * np.tanh(x * scale_a)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
878
        self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
879
        self.outputs = {'Out': out}
880 881 882 883

    def test_check_output(self):
        self.check_output()

Q
qijun 已提交
884
    def test_check_grad(self):
885 886
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
887
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
Q
qijun 已提交
888

889 890 891 892 893 894 895 896 897 898 899 900 901 902
    def init_dtype(self):
        pass


class TestFP16STanh(TestSTanh):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

Q
qijun 已提交
903

K
kexinzhao 已提交
904 905 906
class TestSoftplus(OpTest):
    def setUp(self):
        self.op_type = "softplus"
907 908 909 910 911 912 913 914
        self.dtype = np.float64
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.log(1 + np.exp(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
K
kexinzhao 已提交
915 916 917 918 919

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
920 921
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
922
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
K
kexinzhao 已提交
923

924 925 926 927 928 929 930 931 932 933 934 935 936 937
    def init_dtype(self):
        pass


class TestFP16Softplus(TestSoftplus):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

K
kexinzhao 已提交
938

939 940 941
class TestSoftsign(OpTest):
    def setUp(self):
        self.op_type = "softsign"
942 943 944 945 946 947 948 949
        self.dtype = np.float32
        self.init_dtype()

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = np.divide(x, 1 + np.abs(x))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
950 951 952 953 954

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
955 956
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
957
        self.check_grad(['X'], 'Out', max_relative_error=0.007)
958

959 960 961 962 963 964 965 966 967 968 969 970 971 972
    def init_dtype(self):
        pass


class TestFP16Softsign(TestSoftsign):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

973

974 975 976
class TestThresholdedRelu(OpTest):
    def setUp(self):
        self.op_type = "thresholded_relu"
977 978 979
        self.dtype = np.float32
        self.init_dtype()

980 981
        threshold = 0.25
        self.relative_error = 0.005
982
        X = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
983 984 985

        # Same reason as TestAbs
        X[np.abs(X - threshold) < self.relative_error] = threshold + 0.2
986
        out = (X > threshold) * X
987

988
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
989
        self.attrs = {'threshold': threshold}
990
        self.outputs = {'Out': out}
991 992 993 994 995

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
996 997
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
998
        self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
999

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
    def init_dtype(self):
        pass


class TestFP16ThresholdedRelu(TestThresholdedRelu):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

1014

1015 1016 1017
class TestHardSigmoid(OpTest):
    def setUp(self):
        self.op_type = "hard_sigmoid"
1018 1019 1020
        self.dtype = np.float32
        self.init_dtype()

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
        self.relative_error = 0.002

        X = np.random.uniform(-5, 5, [2, 2]).astype("float32")
        slope = 0.2
        offset = 0.5
        lower_threshold = -offset / slope
        upper_threshold = (1 - offset) / slope

        # Same reason as TestAbs
        X[np.abs(X - lower_threshold) < self.relative_error] = \
            lower_threshold + 0.2
        X[np.abs(X - upper_threshold) < self.relative_error] = \
            upper_threshold - 0.2

        temp = X * slope + offset
1036 1037 1038 1039
        out = np.maximum(0.0, np.minimum(1.0, temp))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.outputs = {'Out': out}
1040 1041 1042 1043 1044

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1045 1046
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1047
        self.check_grad(['X'], 'Out', max_relative_error=0.002)
1048

1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
    def init_dtype(self):
        pass


class TestFP16HardSigmoid(TestHardSigmoid):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

1063

A
Abhinav Arora 已提交
1064 1065 1066
class TestSwish(OpTest):
    def setUp(self):
        self.op_type = "swish"
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
        self.dtype = np.float32
        self.init_dtype()

        X = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
        beta = 2.3
        out = X * expit(beta * X)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(X)}
        self.attrs = {'beta': beta}
        self.outputs = {'Out': out}
A
Abhinav Arora 已提交
1077 1078 1079 1080 1081

    def test_check_output(self):
        self.check_output()

    def test_check_grad(self):
1082 1083
        if self.dtype == np.float16:
            return
F
fengjiayi 已提交
1084
        self.check_grad(['X'], 'Out', max_relative_error=0.008)
A
Abhinav Arora 已提交
1085

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
    def init_dtype(self):
        pass


class TestFP16Swish(TestSwish):
    def init_dtype(self):
        self.dtype = np.float16

    def test_check_output(self):
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
            if core.is_float16_supported(place):
                self.check_output_with_place(place, atol=1e-3)

A
Abhinav Arora 已提交
1100

1101
#--------------------test MKLDNN--------------------
1102
class TestMKLDNNReluDim2(TestRelu):
1103
    def setUp(self):
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
        super(TestMKLDNNReluDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNTanhDim2(TestTanh):
    def setUp(self):
        super(TestMKLDNNTanhDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNSqrtDim2(TestSqrt):
    def setUp(self):
        super(TestMKLDNNSqrtDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNAbsDim2(TestAbs):
    def setUp(self):
        super(TestMKLDNNAbsDim2, self).setUp()

        self.attrs = {"use_mkldnn": True}


class TestMKLDNNReluDim4(TestRelu):
    def setUp(self):
        super(TestMKLDNNReluDim4, self).setUp()
1133

1134 1135 1136
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
1137
        out = np.maximum(x, 0)
1138

1139 1140 1141
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}
1142 1143


1144
class TestMKLDNNTanhDim4(TestTanh):
1145
    def setUp(self):
1146
        super(TestMKLDNNTanhDim4, self).setUp()
1147

1148 1149 1150 1151
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
K
Krzysztof Binias 已提交
1152
        self.attrs = {"use_mkldnn": True}
1153 1154


1155
class TestMKLDNNSqrtDim4(TestSqrt):
1156
    def setUp(self):
1157
        super(TestMKLDNNSqrtDim4, self).setUp()
1158

1159 1160 1161 1162
        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
K
Krzysztof Binias 已提交
1163
        self.attrs = {"use_mkldnn": True}
1164 1165


1166
class TestMKLDNNAbsDim4(TestAbs):
1167
    def setUp(self):
1168
        super(TestMKLDNNAbsDim4, self).setUp()
1169

1170 1171 1172 1173 1174
        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
        self.outputs = {'Out': np.abs(self.inputs['X'])}
K
Krzysztof Binias 已提交
1175
        self.attrs = {"use_mkldnn": True}
1176 1177


Q
qijun 已提交
1178 1179
if __name__ == "__main__":
    unittest.main()