diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index 83e9cfd90a8515c8fd15842c114e6b2c59f45d18..9c0893456abf8bbf56fc52c7444034d12cbe197b 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -74,7 +74,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { src_tz, dst_tz, ksize, strides, paddings, pooling_type, ctx.Attr("ceil_mode"), input->format(), paddle::framework::ToMKLDNNDataType(input->type()), is_test, dev_ctx, - ctx.GetPlace(), ctx.op().Output("Out")); + ctx.GetPlace(), ctx.op().Output("Out"), ctx.Attr("exclusive")); auto src_memory = handler.AcquireSrcMemory(input); auto dst_memory = handler.AcquireDstMemory(output); @@ -158,7 +158,7 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { diff_dst_tz, diff_src_tz, ksize, strides, paddings, pooling_type, ctx.Attr("ceil_mode"), in_x->format(), out_grad->format(), paddle::framework::ToMKLDNNDataType(out_grad->type()), dev_ctx, - ctx.GetPlace(), ctx.op().Input("Out")); + ctx.GetPlace(), ctx.op().Input("Out"), ctx.Attr("exclusive")); auto diff_dst_memory = handler.AcquireDiffDstMemory(out_grad); auto diff_src_memory = handler.AcquireDiffSrcMemory(in_x_grad); diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 7396b90ea3d0728a6c63069e8cb3089cc3c47f98..50c629e53ef280975346fac9a3912c0b6ea6c924 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -529,7 +529,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT( dev_ctx, dev_ctx.GetEngine(), cpu_place, @@ -553,8 +553,11 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerTAcquireForwardPrimitiveDescriptor( is_test ? mkldnn::prop_kind::forward_inference : mkldnn::prop_kind::forward_training, - pooling_type == "max" ? mkldnn::algorithm::pooling_max - : mkldnn::algorithm::pooling_avg, + pooling_type == "max" + ? mkldnn::algorithm::pooling_max + : (exclude_padding + ? mkldnn::algorithm::pooling_avg_exclude_padding + : mkldnn::algorithm::pooling_avg_include_padding), src_md, dst_md, strides, ksize, padding_left_top, padding_right_bottom, mkldnn::padding_kind::zero); } @@ -567,7 +570,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT( dev_ctx, dev_ctx.GetEngine(), cpu_place, @@ -580,8 +583,11 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerTAcquireBackwardPrimitiveDescriptor( - pooling_type == "max" ? mkldnn::algorithm::pooling_max - : mkldnn::algorithm::pooling_avg, + pooling_type == "max" + ? mkldnn::algorithm::pooling_max + : (exclude_padding + ? mkldnn::algorithm::pooling_avg_exclude_padding + : mkldnn::algorithm::pooling_avg_include_padding), diff_src_md, diff_dst_md, strides, ksize, paddings, paddings, mkldnn::padding_kind::zero); } diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py index fca906fecc5fe8d25b9251c886398f8df778043f..b65cdcf3d3489dbd5c6c4c9a22c3245a662904f8 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_int8_mkldnn_op.py @@ -34,6 +34,13 @@ class TestPool2dMKLDNNInt8_Op(TestPool2D_Op): TestPool2D_Op.setUp(self) assert self.dtype in [np.int8, np.uint8 ], 'Dtype should be int8 or uint8' + input = np.random.randint(0, 100, self.shape).astype(self.dtype) + output = (self.pool2D_forward_naive( + input, self.ksize, self.strides, self.paddings, self.global_pool, + self.ceil_mode, self.exclusive, self.adaptive, + self.dtype)).astype(self.dtype) + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} + self.outputs = {'Out': output} def test_check_output(self): self.check_output_with_place(core.CPUPlace(), atol=1e-5) @@ -52,6 +59,9 @@ class TestCase1Avg(TestPool2dMKLDNNInt8_Op): def init_global_pool(self): self.global_pool = False + def init_exclusive(self): + self.exclusive = True + class TestCase2Avg(TestPool2dMKLDNNInt8_Op): def init_test_case(self): @@ -63,6 +73,9 @@ class TestCase2Avg(TestPool2dMKLDNNInt8_Op): def init_global_pool(self): self.global_pool = False + def init_exclusive(self): + self.exclusive = False + class TestCase0Max(TestPool2dMKLDNNInt8_Op): def init_pool_type(self): diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 84f6526b8052d77a32130487e1bc80c6439db7b7..9d501b709425e9f993503ea3eb5ed7488973ca7f 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -37,7 +37,8 @@ def max_pool2D_forward_naive(x, global_pool=0, ceil_mode=False, exclusive=True, - adaptive=False): + adaptive=False, + data_type=np.float32): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] @@ -76,7 +77,8 @@ def avg_pool2D_forward_naive(x, global_pool=0, ceil_mode=False, exclusive=True, - adaptive=False): + adaptive=False, + data_type=np.float32): N, C, H, W = x.shape if global_pool == 1: ksize = [H, W] @@ -106,7 +108,13 @@ def avg_pool2D_forward_naive(x, field_size = ((r_end - r_start) * (c_end - c_start)) \ if (exclusive or adaptive) else (ksize[0] * ksize[1]) - out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size + if data_type == np.int8 or data_type == np.uint8: + out[:, :, i, j] = (np.rint( + np.sum(x_masked, axis=(2, 3)) / + field_size)).astype(data_type) + else: + out[:, :, i, j] = (np.sum(x_masked, axis=(2, 3)) / + field_size).astype(data_type) return out @@ -126,9 +134,10 @@ class TestPool2D_Op(OpTest): if self.global_pool: self.paddings = [0 for _ in range(len(self.paddings))] input = np.random.random(self.shape).astype(self.dtype) - output = self.pool2D_forward_naive( + output = (self.pool2D_forward_naive( input, self.ksize, self.strides, self.paddings, self.global_pool, - self.ceil_mode, self.exclusive, self.adaptive).astype(self.dtype) + self.ceil_mode, self.exclusive, self.adaptive, + self.dtype)).astype(self.dtype) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} self.attrs = {