From 7cb4a8b8f21b3432ceeacdc23ddf8b6841cf0e50 Mon Sep 17 00:00:00 2001 From: lidanqing Date: Fri, 16 Oct 2020 04:23:57 +0200 Subject: [PATCH] [oneDNN] Conv dilation support (#27914) * conv dilated mkldnn support: forward and backward pass * add mkldnn conv_transpose dilation UT test=develop * remove unnecessary PADDLE_ENFORCE * add int8 and bf16 dilated conv UT * update according to reviews --- .../ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc | 13 ------ .../fluid/operators/mkldnn/conv_mkldnn_op.cc | 46 ++++++++----------- .../mkldnn/conv_transpose_mkldnn_op.cc | 18 +++++--- paddle/fluid/platform/mkldnn_reuse.h | 9 ++-- .../mkldnn/test_conv2d_bf16_mkldnn_op.py | 21 ++++++--- .../mkldnn/test_conv2d_int8_mkldnn_op.py | 17 +++++++ .../unittests/mkldnn/test_conv2d_mkldnn_op.py | 17 +++++++ .../mkldnn/test_conv2d_transpose_mkldnn_op.py | 14 ++++++ 8 files changed, 99 insertions(+), 56 deletions(-) diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc index 63524294b6..dfb030a7cc 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc @@ -84,19 +84,6 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const { VLOG(3) << "do not perform " + type() + "+bias fuse"; return; } - if (conv->Op()->HasAttr("dilations")) { - auto dilations = - BOOST_GET_CONST(std::vector, conv->Op()->GetAttr("dilations")); - for (const auto& d : dilations) { - if (d != 1) { - LOG(WARNING) - << "dilation conv not supported in MKLDNN, fuse not apply " - << "and set conv attribute use_mkldnn = false"; - conv->Op()->SetAttr("use_mkldnn", false); - return; - } - } - } auto* eltwise_bias_tensor = scope->FindVar(eltwise_bias->Name())->GetMutable(); diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index 7a4e11091f..f44ce8c567 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -193,13 +193,8 @@ class ConvMKLDNNHandlerT data_dims, strides, ksize); const bool is_conv3d = strides.size() == 3U; - PADDLE_ENFORCE_EQ( - is_conv3d - ? dilations.size() == 3 && dilations[0] == 1 && - dilations[1] == 1 && dilations[2] == 1 - : dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1, - true, platform::errors::Unimplemented( - "Dilation in oneDNN convolution is not implemented yet")); + std::transform(dilations.begin(), dilations.end(), dilations.begin(), + [](int64_t i) { return i - 1; }); const auto src_tz = paddle::framework::vectorize(input->dims()); @@ -210,6 +205,7 @@ class ConvMKLDNNHandlerT const mkldnn::memory::dims stride_dims = strides; const auto mkldnn_paddings = platform::ToMkldnnPadding(paddings); + const mkldnn::memory::dims dilations_dims = dilations; /* create memory descriptor for convolution without specified format * ('any') which lets a primitive (convolution in this case) choose @@ -256,13 +252,13 @@ class ConvMKLDNNHandlerT this->AcquireForwardPrimitiveDescriptor( conv_attr, fwd_prop_kind, dnnl::algorithm::convolution_direct, - src_md, weights_md, bias_md, dst_md, stride_dims, + src_md, weights_md, bias_md, dst_md, stride_dims, dilations_dims, mkldnn_paddings[0], mkldnn_paddings[1]); } else { this->AcquireForwardPrimitiveDescriptor( conv_attr, fwd_prop_kind, dnnl::algorithm::convolution_direct, - src_md, weights_md, dst_md, stride_dims, mkldnn_paddings[0], - mkldnn_paddings[1]); + src_md, weights_md, dst_md, stride_dims, dilations_dims, + mkldnn_paddings[0], mkldnn_paddings[1]); } } } @@ -619,9 +615,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { bool is_conv3d = strides.size() == 3U; PADDLE_ENFORCE_NE(is_conv3d, true, - platform::errors::InvalidArgument( - "int8 does not support conv3d currently, should " - "set param is_conv3d as False")); + platform::errors::Unimplemented( + "int8 does not support conv3d currently")); auto input_dims = input->dims(); auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); @@ -641,13 +636,8 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { GetWeightsTz(weights_tz, g); auto dst_tz = paddle::framework::vectorize(output->dims()); - PADDLE_ENFORCE_EQ( - is_conv3d - ? dilations.size() == 3 && dilations[0] == 1 && - dilations[1] == 1 && dilations[2] == 1 - : dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1, - true, platform::errors::Unimplemented( - "dilation in convolution is not implemented yet")); + std::transform(dilations.begin(), dilations.end(), dilations.begin(), + [](int64_t i) { return i - 1; }); const K* filter_data = filter->data(); auto scale_in_data = ctx.Attr("Scale_in"); @@ -710,13 +700,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto bias_md = platform::MKLDNNMemDesc(bias_tz, memory::data_type::s32, MKLDNNMemoryFormat::x); conv_pd = handler->AcquireConvolutionPrimitiveDescriptor( - src_md, weights_md, bias_md, dst_md, strides, paddings, + src_md, weights_md, bias_md, dst_md, strides, dilations, paddings, mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta, fuse_residual_conn, propagation, output_shift_scale, sum_scale); } else { conv_pd = handler->AcquireConvolutionPrimitiveDescriptor( - src_md, weights_md, boost::none, dst_md, strides, paddings, - mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta, + src_md, weights_md, boost::none, dst_md, strides, dilations, + paddings, mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta, fuse_residual_conn, propagation, output_shift_scale, sum_scale); } @@ -1019,11 +1009,14 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { "Fail to find conv_pd in device context")); auto mkldnn_paddings = platform::ToMkldnnPadding(paddings); - + std::transform(dilations.begin(), dilations.end(), dilations.begin(), + [](int64_t i) { return i - 1; }); + const mkldnn::memory::dims dilations_dims = dilations; // create backward convolution weights primitive descriptor auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc( mkldnn::algorithm::convolution_direct, src_md, diff_weights_md, - diff_dst_md, strides, mkldnn_paddings[0], mkldnn_paddings[1]); + diff_dst_md, strides, dilations_dims, mkldnn_paddings[0], + mkldnn_paddings[1]); auto conv_bwd_weights_pd = std::make_shared( @@ -1032,7 +1025,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { // create backward convolution data primitive descriptor auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc( mkldnn::algorithm::convolution_direct, diff_src_md, weights_md, - diff_dst_md, strides, mkldnn_paddings[0], mkldnn_paddings[1]); + diff_dst_md, strides, dilations_dims, mkldnn_paddings[0], + mkldnn_paddings[1]); auto conv_bwd_data_pd = std::make_shared( diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index 5653790021..e9f32e7ac2 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -104,6 +104,11 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { int groups = ctx.Attr("groups"); std::string padding_algorithm = ctx.Attr("padding_algorithm"); + PADDLE_ENFORCE_EQ( + strides.size(), 2, + platform::errors::Unimplemented( + "Now we only support 2d oneDNN convolution transpose op")); + auto input_dims = input->dims(); auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); auto filter_dims = filter->dims(); @@ -115,10 +120,8 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, data_dims, strides, ksize); - PADDLE_ENFORCE( - dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1, - platform::errors::Unimplemented( - "dilation in convolution is not implemented yet")); + std::transform(dilations.begin(), dilations.end(), dilations.begin(), + [](int64_t i) { return i - 1; }); const T* input_data = input->data(); const T* filter_data = filter->data(); @@ -210,11 +213,12 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { auto bias_md = platform::MKLDNNMemDesc( bias_tz, platform::MKLDNNGetDataType(), MKLDNNMemoryFormat::x); conv_transpose_pd = handler.AcquireConvolutionPrimitiveDescriptor( - src_md, weights_md, bias_md, dst_md, strides, paddings, mkldnn_engine, - fuse_activation, fuse_alpha, fuse_beta, false, fwd_prop_kind); + src_md, weights_md, bias_md, dst_md, strides, dilations, paddings, + mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta, false, + fwd_prop_kind); } else { conv_transpose_pd = handler.AcquireConvolutionPrimitiveDescriptor( - src_md, weights_md, boost::none, dst_md, strides, paddings, + src_md, weights_md, boost::none, dst_md, strides, dilations, paddings, mkldnn_engine, fuse_activation, fuse_alpha, fuse_beta, false, fwd_prop_kind); } diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index 785627a09f..740ac1d81f 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -1330,6 +1330,7 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler { const mkldnn::memory::desc& src, const mkldnn::memory::desc& weights, boost::optional bias, const mkldnn::memory::desc& dst, const std::vector& strides, + const std::vector& dilations, const std::vector& paddings, const mkldnn::engine& engine, const std::string& fuse_activation, float fuse_alpha, float fuse_beta, const bool fuse_residual_conn, mkldnn::prop_kind fwd_prop_kind, @@ -1352,18 +1353,18 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler { dev_ctx_.GetBlob(key_conv_pd)); if (conv_pd_ == nullptr) { mkldnn::memory::dims stride_dims = strides; - + mkldnn::memory::dims dilations_dims = dilations; auto mkldnn_paddings = ToMkldnnPadding(paddings); auto conv_desc = bias ? typename forward_t::desc( fwd_prop_kind, convolutional_algorithm::T, - src, weights, *bias, dst, stride_dims, + src, weights, *bias, dst, stride_dims, dilations_dims, mkldnn_paddings[0], mkldnn_paddings[1]) : typename forward_t::desc( fwd_prop_kind, convolutional_algorithm::T, - src, weights, dst, stride_dims, mkldnn_paddings[0], - mkldnn_paddings[1]); + src, weights, dst, stride_dims, dilations_dims, + mkldnn_paddings[0], mkldnn_paddings[1]); mkldnn::primitive_attr conv_attr = CreatePostOps(fuse_activation, fuse_alpha, fuse_beta, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py index 6f0b4f9076..0311eb887a 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_bf16_mkldnn_op.py @@ -23,12 +23,6 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.test_conv2d_op import conv2d_forward_naive, TestConv2dOp -def conv2d_forward_refer(input, filter, group, conv_param): - out, in_n, out_h, out_w, out_c = conv2d_forward_naive(input, filter, group, - conv_param) - return out - - def conv2d_residual_naive(out, residual): assert out.shape == residual.shape out = np.add(out, residual) @@ -176,6 +170,21 @@ class TestWithStride(TestConv2dBf16Op): self.input_type = np.uint16 +class TestWithDilations(TestConv2dBf16Op): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [2, 2] + self.input_size = [2, 3, 10, 10] + self.input_residual_size = [2, 6, 8, 8] + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [6, f_c, 3, 3] + + def init_data_type(self): + self.input_type = np.uint16 + + class TestWith1x1ForceFP32Output(TestConv2dBf16Op): def init_test_case(self): self.pad = [0, 0] diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py index 9731efced6..388eb38fc6 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -228,6 +228,22 @@ class TestWithStride(TestConv2dInt8Op): self.scale_in_eltwise = 0.5 +class TestWithDilations(TestConv2dInt8Op): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [2, 2] + self.input_size = [2, 3, 10, 10] + self.input_residual_size = [2, 6, 8, 8] + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [6, f_c, 3, 3] + self.scale_in = 1.0 + self.scale_out = 0.8 + self.scale_weights = [10.0] + self.scale_in_eltwise = 0.5 + + class TestWith1x1(TestConv2dInt8Op): def init_test_case(self): self.pad = [0, 0] @@ -343,6 +359,7 @@ def create_test_int8_class(parent): create_test_int8_class(TestConv2dInt8Op) create_test_int8_class(TestWithPad) create_test_int8_class(TestWithStride) +create_test_int8_class(TestWithDilations) create_test_int8_class(TestWithGroup) create_test_int8_class(TestWith1x1) create_test_int8_class(TestWithInput1x1Filter1x1) diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py index 6600d1456d..6fad98874e 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py @@ -215,5 +215,22 @@ class TestConv2dOp_AsyPadding_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): self.padding_algorithm = "EXPLICIT" +class TestMKLDNNDilations(TestConv2dMKLDNNOp): + def init_test_case(self): + TestConv2dMKLDNNOp.init_test_case(self) + self.pad = [0, 0] + self.stride = [1, 1] + self.input_size = [2, 3, 10, 10] # NCHW + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] // self.groups + self.filter_size = [12, f_c, 3, 3] + + def init_dilation(self): + self.dilations = [2, 2] + + def init_group(self): + self.groups = 3 + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py index b98610760e..1f68c35ec2 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -136,3 +136,17 @@ class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): self.data_format = "NHWC" N, C, H, W = self.input_size self.input_size = [N, H, W, C] + + +class TestConv2dTransposeMKLDNNWithDilationsExplicitPad( + TestConv2dTransposeMKLDNNOp): + def init_test_case(self): + TestConv2dTransposeMKLDNNOp.init_test_case(self) + self.stride = [2, 1] + self.dilations = [1, 2] + self.groups = 1 + self.input_size = [4, 3, 8, 7] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 6, 4, 3] + self.pad = [1, 3, 2, 1] + self.padding_algorithm = "EXPLICIT" -- GitLab