test_activation_mkldnn_op.py 16.5 KB
Newer Older
M
Commit  
mozga-intel 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
16

M
Commit  
mozga-intel 已提交
17
import numpy as np
18
from mkldnn_op_test import check_if_mkldnn_primitives_exist_in_bwd
19
from scipy.special import expit
20

姜永久 已提交
21 22
import paddle
import paddle.nn.functional as F
23
from paddle.fluid import core
24 25 26 27
from paddle.fluid.tests.unittests.eager_op_test import (
    OpTest,
    convert_float_to_uint16,
)
28 29
from paddle.fluid.tests.unittests.test_activation_op import (
    TestAbs,
30
    TestAbs_ZeroDim,
31
    TestActivation,
32
    TestActivation_ZeroDim,
33
    TestHardSwish,
34
    TestHardSwish_ZeroDim,
35
    TestLeakyRelu,
36
    TestLeakyRelu_ZeroDim,
37
    TestRelu,
38
    TestRelu6,
39 40
    TestRelu6_ZeroDim,
    TestRelu_ZeroDim,
41
    TestSigmoid,
42 43 44
    TestSigmoid_ZeroDim,
    TestSoftplus,
    TestSoftplus_ZeroDim,
45
    TestSqrt,
46
    TestSqrt_ZeroDim,
47
    TestSwish,
48
    TestSwish_ZeroDim,
49
    TestTanh,
50
    TestTanh_ZeroDim,
51
)
A
Adam 已提交
52
from paddle.fluid.tests.unittests.test_gelu_op import gelu
M
Commit  
mozga-intel 已提交
53 54 55 56


class TestMKLDNNReluDim2(TestRelu):
    def setUp(self):
57
        super().setUp()
M
Commit  
mozga-intel 已提交
58 59 60

        self.attrs = {"use_mkldnn": True}

61 62
    def init_dtype(self):
        self.dtype = np.float32
63

M
Commit  
mozga-intel 已提交
64

65 66 67 68 69 70 71 72 73 74
class TestMKLDNNRelu_ZeroDim(TestRelu_ZeroDim):
    def setUp(self):
        super().setUp()

        self.attrs = {"use_mkldnn": True}

    def init_dtype(self):
        self.dtype = np.float32


A
Adam 已提交
75 76
class TestMKLDNNRelu6Dim2(TestRelu6):
    def setUp(self):
77
        super().setUp()
A
Adam 已提交
78 79 80 81 82 83
        self.attrs.update({"use_mkldnn": True})

    def init_dtype(self):
        self.dtype = np.float32


84 85 86 87 88 89 90 91 92
class TestMKLDNNRelu6_ZeroDim(TestRelu6_ZeroDim):
    def setUp(self):
        super().setUp()
        self.attrs.update({"use_mkldnn": True})

    def init_dtype(self):
        self.dtype = np.float32


A
Adam 已提交
93 94
class TestMKLDNNLeakyReluDim2(TestLeakyRelu):
    def setUp(self):
95
        super().setUp()
A
Adam 已提交
96 97 98

        self.attrs = {"use_mkldnn": True}

99 100
    def init_dtype(self):
        self.dtype = np.float32
101

姜永久 已提交
102 103 104 105 106 107 108 109
    def test_check_output(self):
        self.check_output(check_dygraph=False)

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=False)

A
Adam 已提交
110

111 112 113 114 115 116 117 118 119 120
class TestMKLDNNLeakyRelu_ZeroDim(TestLeakyRelu_ZeroDim):
    def setUp(self):
        super().setUp()

        self.attrs = {"use_mkldnn": True}

    def init_dtype(self):
        self.dtype = np.float32


A
Adam 已提交
121 122 123
class TestMKLDNNGeluDim2(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
姜永久 已提交
124
        self.python_api = F.gelu
A
Adam 已提交
125 126 127 128 129 130 131 132 133 134
        self.dtype = np.float32

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, False)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}


135 136 137 138 139 140 141 142 143 144 145 146 147 148
class TestMKLDNNGelu_ZeroDim(TestActivation_ZeroDim):
    def setUp(self):
        self.op_type = "gelu"
        self.python_api = F.gelu
        self.dtype = np.float32

        x = np.random.uniform(-1, 1, []).astype(self.dtype)
        out = gelu(x, False)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}


A
Adam 已提交
149 150 151
class TestMKLDNNGeluDim2Approx(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
姜永久 已提交
152
        self.python_api = F.gelu
A
Adam 已提交
153 154 155 156 157 158 159 160 161 162
        self.dtype = np.float32

        x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype)
        out = gelu(x, True)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True, "approximate": True}


M
Commit  
mozga-intel 已提交
163 164
class TestMKLDNNTanhDim2(TestTanh):
    def setUp(self):
165
        super().setUp()
M
Commit  
mozga-intel 已提交
166 167 168

        self.attrs = {"use_mkldnn": True}

169 170
    def init_dtype(self):
        self.dtype = np.float32
171

M
Commit  
mozga-intel 已提交
172

173 174 175 176 177 178 179 180 181 182
class TestMKLDNNTanh_ZeroDim(TestTanh_ZeroDim):
    def setUp(self):
        super().setUp()

        self.attrs = {"use_mkldnn": True}

    def init_dtype(self):
        self.dtype = np.float32


M
Commit  
mozga-intel 已提交
183 184
class TestMKLDNNSqrtDim2(TestSqrt):
    def setUp(self):
185
        super().setUp()
M
Commit  
mozga-intel 已提交
186 187 188

        self.attrs = {"use_mkldnn": True}

189 190
    def init_dtype(self):
        self.dtype = np.float32
191

M
Commit  
mozga-intel 已提交
192

193 194 195 196 197 198 199 200 201 202
class TestMKLDNNSqrt_ZeroDim(TestSqrt_ZeroDim):
    def setUp(self):
        super().setUp()

        self.attrs = {"use_mkldnn": True}

    def init_dtype(self):
        self.dtype = np.float32


M
Commit  
mozga-intel 已提交
203 204
class TestMKLDNNAbsDim2(TestAbs):
    def setUp(self):
205
        super().setUp()
M
Commit  
mozga-intel 已提交
206 207
        self.attrs = {"use_mkldnn": True}

208 209
    def init_dtype(self):
        self.dtype = np.float32
210

M
Commit  
mozga-intel 已提交
211

212 213 214 215 216 217 218 219 220
class TestMKLDNNAbs_ZeroDim(TestAbs_ZeroDim):
    def setUp(self):
        super().setUp()
        self.attrs = {"use_mkldnn": True}

    def init_dtype(self):
        self.dtype = np.float32


221 222
class TestMKLDNNSwishDim2(TestSwish):
    def setUp(self):
223
        super().setUp()
224

225
        self.attrs["use_mkldnn"] = True
226

227 228
    def init_dtype(self):
        self.dtype = np.float32
229 230


231 232 233 234 235 236 237 238 239 240 241
class TestMKLDNNSwish_ZeroDim(TestSwish_ZeroDim):
    def setUp(self):
        super().setUp()

        self.attrs["use_mkldnn"] = True
        self.check_eager = False

    def init_dtype(self):
        self.dtype = np.float32


J
jakpiase 已提交
242 243
class TestMKLDNNHardSwishDim2(TestHardSwish):
    def setUp(self):
244
        super().setUp()
245
        self.attrs = {"use_mkldnn": True}
Z
Zhang Ting 已提交
246

J
jakpiase 已提交
247

248 249 250 251 252 253
class TestMKLDNNHardSwish_ZeroDim(TestHardSwish_ZeroDim):
    def setUp(self):
        super().setUp()
        self.attrs = {"use_mkldnn": True}


254 255
class TestMKLDNNSigmoidDim2(TestSigmoid):
    def setUp(self):
256
        super().setUp()
257 258 259
        self.attrs = {"use_mkldnn": True}


260 261 262 263 264 265
class TestMKLDNNSigmoid_ZeroDim(TestSigmoid_ZeroDim):
    def setUp(self):
        super().setUp()
        self.attrs = {"use_mkldnn": True}


M
Commit  
mozga-intel 已提交
266 267
class TestMKLDNNReluDim4(TestRelu):
    def setUp(self):
268
        super().setUp()
M
Commit  
mozga-intel 已提交
269 270 271 272 273 274 275 276 277 278

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}

279 280
    def init_dtype(self):
        self.dtype = np.float32
281

M
Commit  
mozga-intel 已提交
282

A
Adam 已提交
283 284
class TestMKLDNNLeakyReluDim4(TestLeakyRelu):
    def setUp(self):
285
        super().setUp()
A
Adam 已提交
286 287 288 289 290 291 292 293 294 295

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        out = np.maximum(x, 0.02 * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}

296 297
    def init_dtype(self):
        self.dtype = np.float32
298

姜永久 已提交
299 300 301 302 303 304 305 306
    def test_check_output(self):
        self.check_output(check_dygraph=False)

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=False)

A
Adam 已提交
307

A
Adam 已提交
308 309 310
class TestMKLDNNGeluDim4(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
姜永久 已提交
311
        self.python_api = F.gelu
A
Adam 已提交
312 313 314 315 316 317 318 319 320 321 322 323 324
        self.dtype = np.float32

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)
        out = gelu(x, False)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}


class TestMKLDNNGeluDim4Approx(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
姜永久 已提交
325
        self.python_api = F.gelu
A
Adam 已提交
326 327 328 329 330 331 332 333 334 335
        self.dtype = np.float32

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype)
        out = gelu(x, True)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True, "approximate": True}


336 337 338
@unittest.skipIf(
    not core.supports_bfloat16(), "place does not support BF16 evaluation"
)
339 340 341
class TestMKLDNNGeluBf16Dim4(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
姜永久 已提交
342
        self.python_api = F.gelu
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
        self.dtype = np.uint16

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
        out = convert_float_to_uint16(gelu(x, False))

        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}

    def test_check_output(self):
        self.check_output_with_place(core.CPUPlace())

    def test_check_grad(self):
        pass


359 360 361
@unittest.skipIf(
    not core.supports_bfloat16(), "place does not support BF16 evaluation"
)
362 363 364
class TestMKLDNNGeluBf16Dim4Approx(TestActivation):
    def setUp(self):
        self.op_type = "gelu"
姜永久 已提交
365
        self.python_api = F.gelu
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
        self.dtype = np.uint16

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(np.float32)
        out = convert_float_to_uint16(gelu(x, True))

        self.inputs = {'X': convert_float_to_uint16(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True, "approximate": True}

    def test_check_output(self):
        self.check_output_with_place(core.CPUPlace())

    def test_check_grad(self):
        pass


M
Commit  
mozga-intel 已提交
382 383
class TestMKLDNNTanhDim4(TestTanh):
    def setUp(self):
384
        super().setUp()
M
Commit  
mozga-intel 已提交
385 386 387 388 389 390 391 392 393 394

        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.tanh(self.inputs['X'])}
        self.attrs = {"use_mkldnn": True}


class TestMKLDNNSqrtDim4(TestSqrt):
    def setUp(self):
395
        super().setUp()
M
Commit  
mozga-intel 已提交
396 397 398 399 400 401 402 403 404 405

        self.inputs = {
            'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
        }
        self.outputs = {'Out': np.sqrt(self.inputs['X'])}
        self.attrs = {"use_mkldnn": True}


class TestMKLDNNAbsDim4(TestAbs):
    def setUp(self):
406
        super().setUp()
M
Commit  
mozga-intel 已提交
407 408 409 410 411 412 413 414

        x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
        # The same reason with TestAbs
        x[np.abs(x) < 0.005] = 0.02
        self.inputs = {'X': x}
        self.outputs = {'Out': np.abs(self.inputs['X'])}
        self.attrs = {"use_mkldnn": True}

415 416
    def init_dtype(self):
        self.dtype = np.float32
417

M
Commit  
mozga-intel 已提交
418

419 420
class TestMKLDNNSwishDim4(TestSwish):
    def setUp(self):
421
        super().setUp()
422

423
        x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)
424 425 426 427 428 429 430
        beta = 2.3
        out = x * expit(beta * x)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True, "beta": beta}

431 432
    def init_dtype(self):
        self.dtype = np.float32
433

姜永久 已提交
434 435 436 437 438 439 440 441
    def test_check_output(self):
        self.check_output(check_dygraph=False)

    def test_check_grad(self):
        if self.dtype == np.float16:
            return
        self.check_grad(['X'], 'Out', check_dygraph=False)

442

J
jakpiase 已提交
443
def ref_hardswish(x, threshold=6.0, scale=6.0, offset=3.0):
444 445 446 447
    x_dtype = x.dtype
    if x_dtype == 'float16':
        x_dtype = 'float16'
        x = x.astype('float32')
448 449 450
    return (
        x * np.minimum(np.maximum(x + offset, 0.0), threshold) / scale
    ).astype(x_dtype)
J
jakpiase 已提交
451 452 453 454


class TestMKLDNNHardSwishDim4(TestHardSwish):
    def setUp(self):
455
        super().setUp()
J
jakpiase 已提交
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473

        x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)
        threshold = 6.0
        scale = 6.0
        offset = 3.0
        x[np.abs(x + offset) < 0.005] = 0.02
        x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02

        out = ref_hardswish(x, threshold, scale, offset)

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}

    def init_dtype(self):
        self.dtype = np.float32


474 475 476
class TestMKLDNNMish(TestActivation):
    def setUp(self):
        self.op_type = "mish"
姜永久 已提交
477
        self.python_api = F.mish
478 479 480 481 482 483 484 485
        self.dtype = np.float32

        x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)
        out = x * np.tanh(np.log(1 + np.exp(x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}
486 487


488 489 490 491 492 493 494 495 496 497 498 499 500 501
class TestMKLDNNMish_ZeroDim(TestActivation_ZeroDim):
    def setUp(self):
        self.op_type = "mish"
        self.python_api = F.mish
        self.dtype = np.float32

        x = np.random.uniform(0.1, 1, []).astype(self.dtype)
        out = x * np.tanh(np.log(1 + np.exp(x)))

        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}


502 503 504
class TestMKLDNNRound(TestActivation):
    def setUp(self):
        self.op_type = "round"
姜永久 已提交
505
        self.python_api = paddle.round
506 507 508 509 510 511
        x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(np.float32)
        out = np.round(x)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}
512 513


514 515 516 517 518 519 520 521 522 523 524 525
class TestMKLDNNRound_ZeroDim(TestActivation_ZeroDim):
    def setUp(self):
        self.op_type = "round"
        self.python_api = paddle.round
        x = np.random.uniform(0.1, 1, []).astype(np.float32)
        out = np.round(x)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}


526 527
class TestMKLDNNSigmoidDim4(TestSigmoid):
    def setUp(self):
528
        super().setUp()
529 530 531 532 533 534 535 536

        x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype)
        out = 1 / (1 + np.exp(-x))
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.outputs = {'Out': out}
        self.attrs = {"use_mkldnn": True}


J
jakpiase 已提交
537 538 539
class TestMKLDNNEluDefaultAlpha(TestActivation):
    def setUp(self):
        self.op_type = "elu"
姜永久 已提交
540
        self.python_api = F.elu
J
jakpiase 已提交
541 542 543 544 545 546 547
        self.set_alpha()

        x = np.random.random((5, 5, 4)).astype("float32")

        self.inputs = {'X': x}
        self.attrs = {'use_mkldnn': True, 'alpha': self.alpha}
        self.outputs = {
548 549
            'Out': np.maximum(0, x)
            + np.minimum(0, self.alpha * (np.exp(x) - 1))
J
jakpiase 已提交
550 551 552 553 554 555
        }

    def set_alpha(self):
        self.alpha = 1.0


556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
class TestMKLDNNEluDefaultAlpha_ZeroDim(TestActivation_ZeroDim):
    def setUp(self):
        self.op_type = "elu"
        self.python_api = F.elu
        self.set_alpha()

        x = np.random.random(()).astype("float32")

        self.inputs = {'X': x}
        self.attrs = {'use_mkldnn': True, 'alpha': self.alpha}
        self.outputs = {
            'Out': np.maximum(0, x)
            + np.minimum(0, self.alpha * (np.exp(x) - 1))
        }

    def set_alpha(self):
        self.alpha = 1.0


J
jakpiase 已提交
575 576 577 578 579
class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha):
    def set_alpha(self):
        self.alpha = 2.5


580 581 582
class TestMKLDNNExpOp(TestActivation):
    def setUp(self):
        self.op_type = "exp"
姜永久 已提交
583
        self.python_api = paddle.exp
584 585 586 587 588 589 590
        x = np.random.random((5, 5, 4)).astype("float32")

        self.inputs = {'X': x}
        self.attrs = {'use_mkldnn': True}
        self.outputs = {'Out': np.exp(x)}


591 592 593 594 595 596 597 598 599 600 601
class TestMKLDNNExpOp_ZeroDim(TestActivation_ZeroDim):
    def setUp(self):
        self.op_type = "exp"
        self.python_api = paddle.exp
        x = np.random.random(()).astype("float32")

        self.inputs = {'X': x}
        self.attrs = {'use_mkldnn': True}
        self.outputs = {'Out': np.exp(x)}


602
# Check if primitives already exist in backward
603 604
class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):
    def setUp(self):
W
wanghuancoder 已提交
605
        paddle.enable_static()
606
        super().setUp()
607 608

        np.random.seed(123)
609
        self.op_type = 'abs'
姜永久 已提交
610
        self.python_api = paddle.abs
611 612 613 614 615 616 617 618 619 620
        self.x = np.random.uniform(-1, 1, [2, 2]).astype(np.float32)
        self.out = np.abs(self.x)
        self.out_grad = np.random.random_sample(self.x.shape).astype(np.float32)
        self.x_grad = self.__abs_bwd(self.x, self.out_grad)

    # Abs grad calculation
    def __abs_bwd(self, x, out_grad):
        return out_grad * np.sign(x)

    def test_check(self):
621 622 623
        check_if_mkldnn_primitives_exist_in_bwd(
            self, self.op_type, self.x, self.out, self.out_grad, self.x_grad
        )
624 625


626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
class TestMKLDNNSoftplusDim2(TestSoftplus):
    def setUp(self):
        super().setUp()
        self.attrs.update({"use_mkldnn": True})

    def init_dtype(self):
        self.dtype = np.float32


class TestMKLDNNSoftplus_ZeroDim(TestSoftplus_ZeroDim):
    def setUp(self):
        super().setUp()
        self.attrs.update({"use_mkldnn": True})

    def init_dtype(self):
        self.dtype = np.float32


M
Commit  
mozga-intel 已提交
644 645
if __name__ == '__main__':
    unittest.main()