From e8890031a7cd9959875ed7515e7bac31bad3c603 Mon Sep 17 00:00:00 2001 From: Adam <38704900+grygielski@users.noreply.github.com> Date: Thu, 7 Nov 2019 15:58:53 +0100 Subject: [PATCH] [cherry-pick] Add support for asymetric padding in MKLDNN pool, conv and conv_transpose (#21072) * Add asymetric padding support for mkldnn pooling test=develop * Add asymetric padding support for mkldnn conv test=develop * Add asymetric padding support for mkldnn conv_transpose test=develop --- .../fluid/operators/mkldnn/conv_mkldnn_op.cc | 45 +++++++++- .../mkldnn/conv_transpose_mkldnn_op.cc | 13 +++ .../fluid/operators/mkldnn/pool_mkldnn_op.cc | 46 ++++++---- paddle/fluid/platform/mkldnn_helper.h | 22 +++++ paddle/fluid/platform/mkldnn_reuse.h | 35 ++++---- .../mkldnn/test_conv2d_int8_mkldnn_op.py | 22 +++++ .../unittests/mkldnn/test_conv2d_mkldnn_op.py | 23 ++++- .../mkldnn/test_conv2d_transpose_mkldnn_op.py | 21 +++++ .../unittests/mkldnn/test_conv3d_mkldnn_op.py | 24 ++++- .../unittests/mkldnn/test_pool2d_mkldnn_op.py | 90 ++++++++++++++++++- 10 files changed, 304 insertions(+), 37 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index b1b69af145e..5bb362d475b 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -171,8 +171,20 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { float fuse_beta = ctx.Attr("fuse_beta"); bool fuse_residual_conn = ctx.Attr("fuse_residual_connection"); int groups = ctx.Attr("groups"); + std::string padding_algorithm = ctx.Attr("padding_algorithm"); bool is_conv3d = strides.size() == 3U; + auto input_dims = input->dims(); + auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); + auto filter_dims = filter->dims(); + auto filter_data_dims = + framework::slice_ddim(filter_dims, 2, filter_dims.size()); + + auto ksize = framework::vectorize(filter_data_dims); + + UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + data_dims, strides, ksize); + PADDLE_ENFORCE( is_conv3d ? dilations.size() == 3 && dilations[0] == 1 && dilations[1] == 1 && @@ -435,12 +447,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); std::vector strides = ctx.Attr>("strides"); + std::string padding_algorithm = + ctx.Attr("padding_algorithm"); bool is_conv3d = strides.size() == 3U; PADDLE_ENFORCE_NE(is_conv3d, true, "int8 does not support conv3d currently"); + auto input_dims = input->dims(); + auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); + auto filter_dims = filter->dims(); + auto filter_data_dims = + framework::slice_ddim(filter_dims, 2, filter_dims.size()); + + auto ksize = framework::vectorize(filter_data_dims); + + UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + data_dims, strides, ksize); + int groups = ctx.Attr("groups"); auto weights_tz = paddle::framework::vectorize(filter->dims()); int g = std::max(groups, 1); @@ -696,6 +721,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); + std::string padding_algorithm = ctx.Attr("padding_algorithm"); int groups = ctx.Attr("groups"); bool is_conv3d = strides.size() == 3U; @@ -705,6 +731,17 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { T* input_grad_data = nullptr; T* filter_grad_data = nullptr; + auto input_dims = input->dims(); + auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); + auto filter_dims = filter->dims(); + auto filter_data_dims = + framework::slice_ddim(filter_dims, 2, filter_dims.size()); + + auto ksize = framework::vectorize(filter_data_dims); + + UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + data_dims, strides, ksize); + auto src_tz = paddle::framework::vectorize(input->dims()); auto weights_tz = paddle::framework::vectorize(filter->dims()); int g = std::max(groups, 1); @@ -766,10 +803,13 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { PADDLE_ENFORCE_NE(conv_pd, nullptr, "Fail to find conv_pd in device context"); + auto mkldnn_paddings = platform::ToMkldnnPadding(paddings); + // create backward convolution weights primitive descriptor auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc( mkldnn::convolution_direct, src_md, diff_weights_md, diff_dst_md, - strides, paddings, paddings, mkldnn::padding_kind::zero); + strides, mkldnn_paddings[0], mkldnn_paddings[1], + mkldnn::padding_kind::zero); auto conv_bwd_weights_pd = std::make_shared( conv_bwd_weights_desc, mkldnn_engine, *conv_pd); @@ -777,7 +817,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { // create backward convolution data primitive descriptor auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc( mkldnn::convolution_direct, diff_src_md, weights_md, diff_dst_md, - strides, paddings, paddings, mkldnn::padding_kind::zero); + strides, mkldnn_paddings[0], mkldnn_paddings[1], + mkldnn::padding_kind::zero); auto conv_bwd_data_pd = std::make_shared( conv_bwd_data_desc, mkldnn_engine, *conv_pd); diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index 2af20228c81..49f4327f4f8 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -16,6 +16,7 @@ #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" +#include "paddle/fluid/operators/conv_op.h" #include "paddle/fluid/platform/mkldnn_reuse.h" namespace paddle { @@ -74,6 +75,18 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); std::vector dilations = ctx.Attr>("dilations"); int groups = ctx.Attr("groups"); + std::string padding_algorithm = ctx.Attr("padding_algorithm"); + + auto input_dims = input->dims(); + auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); + auto filter_dims = filter->dims(); + auto filter_data_dims = + framework::slice_ddim(filter_dims, 2, filter_dims.size()); + + auto ksize = framework::vectorize(filter_data_dims); + + UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm, + data_dims, strides, ksize); PADDLE_ENFORCE( dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1, diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index a7f1bd018c5..c578e69821f 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -50,20 +50,26 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); + bool global_pooling = ctx.Attr("global_pooling"); + std::string padding_algorithm = ctx.Attr("padding_algorithm"); - if (ctx.Attr("global_pooling")) { - for (size_t i = 0; i < ksize.size(); ++i) { - paddings[i] = 0; - ksize[i] = static_cast(input->dims()[i + 2]); - } + // Only 2D pooling is supported now + PADDLE_ENFORCE_EQ(ksize.size(), 2, "ksize must be 2D, i.e. 2D pooling"); + PADDLE_ENFORCE_EQ(pooling_type == "max" || pooling_type == "avg", true, + "pooling_type must be 'max' or 'avg'"); + PADDLE_ENFORCE_EQ(input->dims().size(), 4, + "Input dim must be with 4, i.e. NCHW"); + + auto input_dims = input->dims(); + framework::DDim data_dims = + framework::slice_ddim(input_dims, 2, input_dims.size()); + + if (global_pooling) { + UpdateKsize(&ksize, data_dims); } - // Only 2D pooling is supported now - PADDLE_ENFORCE(ksize.size() == 2, "ksize must be 2D, i.e. 2D pooling"); - PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "avg", - "pooling_type must be 'max' or 'avg'"); - PADDLE_ENFORCE(input->dims().size() == 4, - "Input dim must be with 4, i.e. NCHW"); + UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, data_dims, + strides, ksize); auto src_tz = paddle::framework::vectorize(input->dims()); auto dst_tz = paddle::framework::vectorize(output->dims()); @@ -81,6 +87,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { std::shared_ptr pool_p; std::shared_ptr workspace_memory; + if ((is_test == false) && (pooling_type == "max")) { // Training workspace_memory = handler.AcquireWorkspaceMemory(); @@ -129,14 +136,20 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { std::vector ksize = ctx.Attr>("ksize"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); + bool global_pooling = ctx.Attr("global_pooling"); + std::string padding_algorithm = ctx.Attr("padding_algorithm"); - if (ctx.Attr("global_pooling")) { - for (size_t i = 0; i < ksize.size(); ++i) { - paddings[i] = 0; - ksize[i] = static_cast(in_x->dims()[i + 2]); - } + auto in_x_dims = in_x->dims(); + framework::DDim data_dims = + framework::slice_ddim(in_x_dims, 2, in_x_dims.size()); + + if (global_pooling) { + UpdateKsize(&ksize, data_dims); } + UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, data_dims, + strides, ksize); + auto& dev_ctx = ctx.template device_context(); @@ -162,6 +175,7 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { std::shared_ptr pool_bwd_p; std::shared_ptr workspace_memory; + if (pooling_type == "max") { // Max - pooling needs Workspace workspace_memory = handler.AcquireWorkspaceMemory(); diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index f2acd2a82e3..3f2904fca68 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -211,5 +211,27 @@ inline std::string CreateKey(ArgTypes&&... args) { return key; } +inline std::vector> ToMkldnnPadding( + const std::vector& paddings) { + if (paddings.size() == 6) { + int padding_front = paddings[0]; + int padding_back = paddings[1]; + int padding_top = paddings[2]; + int padding_bottom = paddings[3]; + int padding_left = paddings[4]; + int padding_right = paddings[5]; + + return {{padding_front, padding_top, padding_left}, + {padding_back, padding_bottom, padding_right}}; + } else { + int padding_top = paddings[0]; + int padding_bottom = paddings[1]; + int padding_left = paddings[2]; + int padding_right = paddings[3]; + + return {{padding_top, padding_left}, {padding_bottom, padding_right}}; + } +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/mkldnn_reuse.h b/paddle/fluid/platform/mkldnn_reuse.h index a18228a689b..024b8ef56fa 100644 --- a/paddle/fluid/platform/mkldnn_reuse.h +++ b/paddle/fluid/platform/mkldnn_reuse.h @@ -540,13 +540,12 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT padding_left_top(paddings); - std::vector padding_right_bottom(paddings); + auto mkldnn_paddings = ToMkldnnPadding(paddings); + if (ceil_mode) { CorrectOutputSize(src_dims, dst_dims, ksize, paddings, strides, - padding_right_bottom); + mkldnn_paddings[1]); } - this->AcquireForwardPrimitiveDescriptor( is_test ? mkldnn::prop_kind::forward_inference : mkldnn::prop_kind::forward_training, @@ -555,7 +554,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT(), MKLDNNMemoryFormat::any); + auto mkldnn_paddings = ToMkldnnPadding(paddings); + this->AcquireBackwardPrimitiveDescriptor( pooling_type == "max" ? mkldnn::algorithm::pooling_max : (exclude_padding ? mkldnn::algorithm::pooling_avg_exclude_padding : mkldnn::algorithm::pooling_avg_include_padding), - diff_src_md, diff_dst_md, strides, ksize, paddings, paddings, - mkldnn::padding_kind::zero); + diff_src_md, diff_dst_md, strides, ksize, mkldnn_paddings[0], + mkldnn_paddings[1], mkldnn::padding_kind::zero); } std::shared_ptr AcquireWorkspaceMemory(void) { @@ -1035,17 +1036,19 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler { dev_ctx_.GetBlob(key_conv_pd)); if (conv_pd_ == nullptr) { mkldnn::memory::dims stride_dims = strides; - mkldnn::memory::dims padding_dims = paddings; + + auto mkldnn_paddings = ToMkldnnPadding(paddings); auto conv_desc = - bias ? typename forward_t::desc( - fwd_prop_kind, convolutional_algorithm::T, - src, weights, *bias, dst, stride_dims, padding_dims, - padding_dims, mkldnn::padding_kind::zero) - : typename forward_t::desc( - fwd_prop_kind, convolutional_algorithm::T, - src, weights, dst, stride_dims, padding_dims, - padding_dims, mkldnn::padding_kind::zero); + bias + ? typename forward_t::desc( + fwd_prop_kind, convolutional_algorithm::T, src, + weights, *bias, dst, stride_dims, mkldnn_paddings[0], + mkldnn_paddings[1], mkldnn::padding_kind::zero) + : typename forward_t::desc( + fwd_prop_kind, convolutional_algorithm::T, src, + weights, dst, stride_dims, mkldnn_paddings[0], + mkldnn_paddings[1], mkldnn::padding_kind::zero); mkldnn::primitive_attr conv_attr = CreatePostOps(fuse_activation, fuse_alpha, fuse_beta, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py index 9413554db93..22dd6bd121c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -343,5 +343,27 @@ create_test_int8_class(TestWithGroup) create_test_int8_class(TestWith1x1) create_test_int8_class(TestWithInput1x1Filter1x1) + +class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op): + def init_kernel_type(self): + self.use_mkldnn = True + + def init_paddings(self): + self.pad = [0, 0, 1, 2] + self.padding_algorithm = "EXPLICIT" + + +class TestConv2dOp_Same_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN): + def init_paddings(self): + self.pad = [0, 0] + self.padding_algorithm = "SAME" + + +class TestConv2dOp_Valid_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN): + def init_paddings(self): + self.pad = [1, 1] + self.padding_algorithm = "VALID" + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py index 756d10a9c7d..3ca1762f0f3 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py @@ -19,7 +19,7 @@ import numpy as np import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest -from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp +from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestConv2dOp_v2 def conv2d_bias_naive(out, bias): @@ -176,5 +176,26 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp): self.groups = 3 +class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2): + def init_kernel_type(self): + self.use_mkldnn = True + + def init_paddings(self): + self.pad = [0, 0, 1, 2] + self.padding_algorithm = "EXPLICIT" + + +class TestConv2dOp_Same_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): + def init_paddings(self): + self.pad = [0, 0] + self.padding_algorithm = "SAME" + + +class TestConv2dOp_Valid_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): + def init_paddings(self): + self.pad = [1, 1] + self.padding_algorithm = "VALID" + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py index 33f5ea7ad6f..428c093edf0 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -105,3 +105,24 @@ class TestMKLDNNWithStride(TestConv2dTransposeMKLDNNOp): self.pad = [1, 1] self.stride = [2, 2] self.input_size = [2, 3, 6, 6] # NCHW + + +class TestMKLDNNWithAsymPad(TestConv2dTransposeMKLDNNOp): + def init_test_case(self): + TestConv2dTransposeMKLDNNOp.init_test_case(self) + self.pad = [0, 0, 1, 2] + self.padding_algorithm = "EXPLICIT" + + +class TestMKLDNNWithSamePad(TestConv2dTransposeMKLDNNOp): + def init_test_case(self): + TestConv2dTransposeMKLDNNOp.init_test_case(self) + self.pad = [0, 0] + self.padding_algorithm = "SAME" + + +class TestMKLDNNWithValidPad(TestConv2dTransposeMKLDNNOp): + def init_test_case(self): + TestConv2dTransposeMKLDNNOp.init_test_case(self) + self.pad = [1, 1] + self.padding_algorithm = "VALID" diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py index 080b74502fb..a54640da779 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv3d_mkldnn_op.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest -from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1 +from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1, TestConv3dOp_2 class TestMKLDNN(TestConv3dOp): @@ -55,5 +55,27 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): self.data_format = "NCHW" +class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp): + def init_kernel_type(self): + self.use_mkldnn = True + self.data_format = "NCHW" + + def init_paddings(self): + self.pad = [1, 0, 1, 0, 0, 2] + self.padding_algorithm = "EXPLICIT" + + +class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): + def init_paddings(self): + self.pad = [0, 0, 0] + self.padding_algorithm = "SAME" + + +class TestConv3dOp_Valid_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN): + def init_paddings(self): + self.pad = [1, 1, 1] + self.padding_algorithm = "VALID" + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py index feb2a563eea..5a9c10073a4 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_pool2d_mkldnn_op.py @@ -15,7 +15,7 @@ from __future__ import print_function import unittest -from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 +from paddle.fluid.tests.unittests.test_pool2d_op import * def create_test_mkldnn_use_ceil_class(parent): @@ -53,5 +53,93 @@ create_test_mkldnn_class(TestCase3) create_test_mkldnn_class(TestCase4) create_test_mkldnn_class(TestCase5) + +class TestAsymPad(TestPool2D_Op): + def init_test_case(self): + self.ksize = [3, 3] + self.strides = [1, 1] + + def init_paddings(self): + self.paddings = [1, 0, 1, 0] + + def init_pool_type(self): + self.pool_type = "avg" + self.pool2D_forward_naive = avg_pool2D_forward_naive + + def init_global_pool(self): + self.global_pool = False + + def init_shape(self): + self.shape = [2, 3, 7, 7] + + def init_kernel_type(self): + self.use_mkldnn = True + + def init_global_pool(self): + self.global_pool = False + + +class TestAsymPadCase1(TestAsymPad): + def init_paddings(self): + self.paddings = [1, 1, 0, 0] + + +class TestAsymPadCase2(TestAsymPad): + def init_paddings(self): + self.paddings = [1, 0, 1, 2] + + +class TestAsymPadCase3(TestAsymPad): + def init_paddings(self): + self.paddings = [1, 2, 1, 2] + + +class TestAsymPadCase4(TestAsymPad): + def init_paddings(self): + self.paddings = [1, 0, 1, 2] + + +class TestAsymPadCase5(TestAsymPad): + def init_paddings(self): + self.paddings = [2, 2, 1, 2] + + +class TestAsymPadMaxCase1(TestAsymPadCase1): + def init_pool_type(self): + self.pool_type = "max" + + +class TestAsymPadMaxCase2(TestAsymPadCase2): + def init_pool_type(self): + self.pool_type = "max" + + +class TestAsymPadMaxCase3(TestAsymPadCase3): + def init_pool_type(self): + self.pool_type = "max" + + +class TestAsymPadMaxCase4(TestAsymPadCase4): + def init_pool_type(self): + self.pool_type = "max" + + +class TestAsymPadMaxCase5(TestAsymPadCase5): + def init_pool_type(self): + self.pool_type = "max" + + +class TestAsymPadSame(TestAsymPad): + def init_paddings(self): + self.paddings = [0, 0] + self.padding_algorithm = "SAME" + + +class TestAsymPadValid(TestAsymPad): + def init_paddings(self): + self.paddings = [0, 0, 0, 0] + self.padding_algorithm = "VALID" + + if __name__ == '__main__': unittest.main() -- GitLab