提交 1d32897c 编写于 作者: J joanna.wozna.intel 提交者: Tao Luo

Fix test pool2d int8 mkldnn (#19976)

* Fix conv2d+dequantize squash for residual fusion

test=develop

* Correct int8 input

test=develop

* Add if exclude or include padding in pool2d mkldnn

test=develop
上级 2450d15b
......@@ -74,7 +74,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
src_tz, dst_tz, ksize, strides, paddings, pooling_type,
ctx.Attr<bool>("ceil_mode"), input->format(),
paddle::framework::ToMKLDNNDataType(input->type()), is_test, dev_ctx,
ctx.GetPlace(), ctx.op().Output("Out"));
ctx.GetPlace(), ctx.op().Output("Out"), ctx.Attr<bool>("exclusive"));
auto src_memory = handler.AcquireSrcMemory(input);
auto dst_memory = handler.AcquireDstMemory(output);
......@@ -158,7 +158,7 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
diff_dst_tz, diff_src_tz, ksize, strides, paddings, pooling_type,
ctx.Attr<bool>("ceil_mode"), in_x->format(), out_grad->format(),
paddle::framework::ToMKLDNNDataType(out_grad->type()), dev_ctx,
ctx.GetPlace(), ctx.op().Input("Out"));
ctx.GetPlace(), ctx.op().Input("Out"), ctx.Attr<bool>("exclusive"));
auto diff_dst_memory = handler.AcquireDiffDstMemory(out_grad);
auto diff_src_memory = handler.AcquireDiffSrcMemory(in_x_grad);
......
......@@ -529,7 +529,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
bool ceil_mode, const MKLDNNMemoryFormat fmt,
mkldnn::memory::data_type dt, bool is_test,
const platform::MKLDNNDeviceContext& dev_ctx, platform::Place cpu_place,
const std::string& unique_name)
const std::string& unique_name, bool exclude_padding)
: platform::MKLDNNHandlerT<T, mkldnn::pooling_forward,
mkldnn::pooling_backward>(
dev_ctx, dev_ctx.GetEngine(), cpu_place,
......@@ -553,8 +553,11 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
this->AcquireForwardPrimitiveDescriptor(
is_test ? mkldnn::prop_kind::forward_inference
: mkldnn::prop_kind::forward_training,
pooling_type == "max" ? mkldnn::algorithm::pooling_max
: mkldnn::algorithm::pooling_avg,
pooling_type == "max"
? mkldnn::algorithm::pooling_max
: (exclude_padding
? mkldnn::algorithm::pooling_avg_exclude_padding
: mkldnn::algorithm::pooling_avg_include_padding),
src_md, dst_md, strides, ksize, padding_left_top, padding_right_bottom,
mkldnn::padding_kind::zero);
}
......@@ -567,7 +570,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
const MKLDNNMemoryFormat fmt, const MKLDNNMemoryFormat diff_dst_fmt,
mkldnn::memory::data_type dt,
const platform::MKLDNNDeviceContext& dev_ctx, platform::Place cpu_place,
const std::string& unique_name)
const std::string& unique_name, bool exclude_padding)
: platform::MKLDNNHandlerT<T, mkldnn::pooling_forward,
mkldnn::pooling_backward>(
dev_ctx, dev_ctx.GetEngine(), cpu_place,
......@@ -580,8 +583,11 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
MKLDNNMemoryFormat::any);
this->AcquireBackwardPrimitiveDescriptor(
pooling_type == "max" ? mkldnn::algorithm::pooling_max
: mkldnn::algorithm::pooling_avg,
pooling_type == "max"
? mkldnn::algorithm::pooling_max
: (exclude_padding
? mkldnn::algorithm::pooling_avg_exclude_padding
: mkldnn::algorithm::pooling_avg_include_padding),
diff_src_md, diff_dst_md, strides, ksize, paddings, paddings,
mkldnn::padding_kind::zero);
}
......
......@@ -34,6 +34,13 @@ class TestPool2dMKLDNNInt8_Op(TestPool2D_Op):
TestPool2D_Op.setUp(self)
assert self.dtype in [np.int8, np.uint8
], 'Dtype should be int8 or uint8'
input = np.random.randint(0, 100, self.shape).astype(self.dtype)
output = (self.pool2D_forward_naive(
input, self.ksize, self.strides, self.paddings, self.global_pool,
self.ceil_mode, self.exclusive, self.adaptive,
self.dtype)).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output_with_place(core.CPUPlace(), atol=1e-5)
......@@ -52,6 +59,9 @@ class TestCase1Avg(TestPool2dMKLDNNInt8_Op):
def init_global_pool(self):
self.global_pool = False
def init_exclusive(self):
self.exclusive = True
class TestCase2Avg(TestPool2dMKLDNNInt8_Op):
def init_test_case(self):
......@@ -63,6 +73,9 @@ class TestCase2Avg(TestPool2dMKLDNNInt8_Op):
def init_global_pool(self):
self.global_pool = False
def init_exclusive(self):
self.exclusive = False
class TestCase0Max(TestPool2dMKLDNNInt8_Op):
def init_pool_type(self):
......
......@@ -37,7 +37,8 @@ def max_pool2D_forward_naive(x,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False):
adaptive=False,
data_type=np.float32):
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
......@@ -76,7 +77,8 @@ def avg_pool2D_forward_naive(x,
global_pool=0,
ceil_mode=False,
exclusive=True,
adaptive=False):
adaptive=False,
data_type=np.float32):
N, C, H, W = x.shape
if global_pool == 1:
ksize = [H, W]
......@@ -106,7 +108,13 @@ def avg_pool2D_forward_naive(x,
field_size = ((r_end - r_start) * (c_end - c_start)) \
if (exclusive or adaptive) else (ksize[0] * ksize[1])
out[:, :, i, j] = np.sum(x_masked, axis=(2, 3)) / field_size
if data_type == np.int8 or data_type == np.uint8:
out[:, :, i, j] = (np.rint(
np.sum(x_masked, axis=(2, 3)) /
field_size)).astype(data_type)
else:
out[:, :, i, j] = (np.sum(x_masked, axis=(2, 3)) /
field_size).astype(data_type)
return out
......@@ -126,9 +134,10 @@ class TestPool2D_Op(OpTest):
if self.global_pool:
self.paddings = [0 for _ in range(len(self.paddings))]
input = np.random.random(self.shape).astype(self.dtype)
output = self.pool2D_forward_naive(
output = (self.pool2D_forward_naive(
input, self.ksize, self.strides, self.paddings, self.global_pool,
self.ceil_mode, self.exclusive, self.adaptive).astype(self.dtype)
self.ceil_mode, self.exclusive, self.adaptive,
self.dtype)).astype(self.dtype)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)}
self.attrs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册