diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 9e437fb15e917701e8ca410d0bbf3beca22aa620..d86bab9d3a42fde1b150ce432ae4151ccb5ccc7b 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -387,4 +387,5 @@ REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace, ops::PoolMKLDNNOpKernel); REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace, - ops::PoolMKLDNNGradOpKernel); + ops::PoolMKLDNNGradOpKernel, + ops::PoolMKLDNNGradOpKernel); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py index da37b33d30d5de56ffc95fc38f2bc3f6877a7d5b..5430c1598f84da4b7eac9522f3c227cf766f1cbe 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_bf16_mkldnn_op.py @@ -15,22 +15,63 @@ from __future__ import print_function import unittest -import os import numpy as np import paddle.fluid.core as core -from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive +from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16 +from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive +from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive from paddle import enable_static -@unittest.skipIf(not core.supports_bfloat16(), - "place does not support BF16 evaluation") -class TestPoolBf16MklDNNOp(TestPool2D_Op): +@OpTestTool.skip_if_not_cpu_bf16() +class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest): + def init_kernel_type(self): + self.use_mkldnn = True + + def init_data_type(self): + self.dtype = np.uint16 + + def setUp(self): + super(TestPoolBf16MklDNNOpGrad, self).setUp() + self.attrs['mkldnn_data_type'] = "bfloat16" + self.x_fp32 = np.random.random(self.shape).astype(np.float32) + + output = self.pool2D_forward_naive( + self.x_fp32, self.ksize, self.strides, self.paddings, + self.global_pool, self.ceil_mode, self.exclusive, self.adaptive, + "float32").astype(np.float32) + + self.inputs = {'X': convert_float_to_uint16(self.x_fp32)} + self.outputs = {'Out': convert_float_to_uint16(output)} + + def test_check_output(self): + self.check_output_with_place(core.CPUPlace()) + + def test_check_grad(self): + x_grad = pool2d_backward_naive( + self.x_fp32, + ksize=self.ksize, + strides=self.strides, + paddings=self.paddings, + global_pool=self.global_pool, + ceil_mode=False, + exclusive=self.exclusive, + adaptive=self.adaptive, + data_format=self.data_format, + pool_type=self.pool_type, + padding_algorithm=self.padding_algorithm) + x_grad = x_grad / np.prod(self.outputs['Out'].shape) + self.check_grad_with_place( + core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad]) + + +@OpTestTool.skip_if_not_cpu_bf16() +class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest): def init_kernel_type(self): self.use_mkldnn = True def setUp(self): - TestPool2D_Op.setUp(self) + TestPool2D_Op_Mixin.setUp(self) self.dtype = np.uint16 input = np.random.random(self.shape).astype(np.float32) @@ -95,6 +136,47 @@ class TestCase2Max(TestCase2Avg): self.pool2D_forward_naive = max_pool2D_forward_naive +class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad): + def init_test_case(self): + self.ksize = [3, 3] + self.strides = [1, 1] + + def init_shape(self): + self.shape = [2, 3, 7, 7] + + def init_paddings(self): + self.paddings = [0, 0] + + def init_global_pool(self): + self.global_pool = False + + def init_exclusive(self): + self.exclusive = True + + +class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad): + def init_exclusive(self): + self.exclusive = False + + +class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + +class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad): + def init_pool_type(self): + self.pool_type = "max" + self.pool2D_forward_naive = max_pool2D_forward_naive + + if __name__ == "__main__": enable_static() unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index d66bdd2948d464038336b2dd197710a579f6949b..582ec9501068c0e33ba3f8a0272e6c868092786c 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -19,7 +19,7 @@ import unittest import numpy as np import paddle.fluid.core as core -from op_test import OpTest +from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid as fluid from paddle.fluid import Program, program_guard @@ -252,7 +252,7 @@ def pool2D_forward_naive(x, return out -class TestPool2D_Op(OpTest): +class TestPool2D_Op_Mixin(object): def setUp(self): self.op_type = "pool2d" self.use_cudnn = False @@ -363,6 +363,10 @@ class TestPool2D_Op(OpTest): self.adaptive = False +class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest): + pass + + class TestCase1(TestPool2D_Op): def init_test_case(self): self.ksize = [3, 3]