未验证 提交 f95d44a2 编写于 作者: A arlesniak 提交者: GitHub

Added BF16 Pool2d grad (#37081)

* Added BF16 Pool2d grad

* upstream pulled

* fix for CI

* fixes after review
上级 62ec644f
...@@ -387,4 +387,5 @@ REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace, ...@@ -387,4 +387,5 @@ REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace,
ops::PoolMKLDNNOpKernel<paddle::platform::bfloat16>); ops::PoolMKLDNNOpKernel<paddle::platform::bfloat16>);
REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace, REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace,
ops::PoolMKLDNNGradOpKernel<float>); ops::PoolMKLDNNGradOpKernel<float>,
ops::PoolMKLDNNGradOpKernel<paddle::platform::bfloat16>);
...@@ -15,22 +15,63 @@ ...@@ -15,22 +15,63 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
import os
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16 from paddle.fluid.tests.unittests.op_test import OpTest, OpTestTool, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, avg_pool2D_forward_naive, max_pool2D_forward_naive from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op_Mixin, max_pool2D_forward_naive
from paddle.fluid.tests.unittests.npu.test_pool2d_op_npu import pool2d_backward_navie as pool2d_backward_naive
from paddle import enable_static from paddle import enable_static
@unittest.skipIf(not core.supports_bfloat16(), @OpTestTool.skip_if_not_cpu_bf16()
"place does not support BF16 evaluation") class TestPoolBf16MklDNNOpGrad(TestPool2D_Op_Mixin, OpTest):
class TestPoolBf16MklDNNOp(TestPool2D_Op): def init_kernel_type(self):
self.use_mkldnn = True
def init_data_type(self):
self.dtype = np.uint16
def setUp(self):
super(TestPoolBf16MklDNNOpGrad, self).setUp()
self.attrs['mkldnn_data_type'] = "bfloat16"
self.x_fp32 = np.random.random(self.shape).astype(np.float32)
output = self.pool2D_forward_naive(
self.x_fp32, self.ksize, self.strides, self.paddings,
self.global_pool, self.ceil_mode, self.exclusive, self.adaptive,
"float32").astype(np.float32)
self.inputs = {'X': convert_float_to_uint16(self.x_fp32)}
self.outputs = {'Out': convert_float_to_uint16(output)}
def test_check_output(self):
self.check_output_with_place(core.CPUPlace())
def test_check_grad(self):
x_grad = pool2d_backward_naive(
self.x_fp32,
ksize=self.ksize,
strides=self.strides,
paddings=self.paddings,
global_pool=self.global_pool,
ceil_mode=False,
exclusive=self.exclusive,
adaptive=self.adaptive,
data_format=self.data_format,
pool_type=self.pool_type,
padding_algorithm=self.padding_algorithm)
x_grad = x_grad / np.prod(self.outputs['Out'].shape)
self.check_grad_with_place(
core.CPUPlace(), set(['X']), 'Out', user_defined_grads=[x_grad])
@OpTestTool.skip_if_not_cpu_bf16()
class TestPoolBf16MklDNNOp(TestPool2D_Op_Mixin, OpTest):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
def setUp(self): def setUp(self):
TestPool2D_Op.setUp(self) TestPool2D_Op_Mixin.setUp(self)
self.dtype = np.uint16 self.dtype = np.uint16
input = np.random.random(self.shape).astype(np.float32) input = np.random.random(self.shape).astype(np.float32)
...@@ -95,6 +136,47 @@ class TestCase2Max(TestCase2Avg): ...@@ -95,6 +136,47 @@ class TestCase2Max(TestCase2Avg):
self.pool2D_forward_naive = max_pool2D_forward_naive self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase1PadZeroExclusiveAvgGrad(TestPoolBf16MklDNNOpGrad):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_shape(self):
self.shape = [2, 3, 7, 7]
def init_paddings(self):
self.paddings = [0, 0]
def init_global_pool(self):
self.global_pool = False
def init_exclusive(self):
self.exclusive = True
class TestCase2PadOneNonExclusiveAvgGrad(TestCase1PadZeroExclusiveAvgGrad):
def init_exclusive(self):
self.exclusive = False
class TestCase0InitialMaxGrad(TestPoolBf16MklDNNOpGrad):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase1PadZeroExclusiveMaxGrad(TestCase1PadZeroExclusiveAvgGrad):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
class TestCase2PadOneNonExclusiveMaxGrad(TestCase2PadOneNonExclusiveAvgGrad):
def init_pool_type(self):
self.pool_type = "max"
self.pool2D_forward_naive = max_pool2D_forward_naive
if __name__ == "__main__": if __name__ == "__main__":
enable_static() enable_static()
unittest.main() unittest.main()
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
...@@ -252,7 +252,7 @@ def pool2D_forward_naive(x, ...@@ -252,7 +252,7 @@ def pool2D_forward_naive(x,
return out return out
class TestPool2D_Op(OpTest): class TestPool2D_Op_Mixin(object):
def setUp(self): def setUp(self):
self.op_type = "pool2d" self.op_type = "pool2d"
self.use_cudnn = False self.use_cudnn = False
...@@ -363,6 +363,10 @@ class TestPool2D_Op(OpTest): ...@@ -363,6 +363,10 @@ class TestPool2D_Op(OpTest):
self.adaptive = False self.adaptive = False
class TestPool2D_Op(TestPool2D_Op_Mixin, OpTest):
pass
class TestCase1(TestPool2D_Op): class TestCase1(TestPool2D_Op):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3] self.ksize = [3, 3]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册