提交 3fda695b 编写于 作者: A Adam 提交者: Tao Luo

Add support for asymetric padding in MKLDNN pool, conv and conv_transpose (#21062)

* Add asymetric padding support for mkldnn pooling
test=develop

* Add asymetric padding support for mkldnn conv
test=develop

* Add asymetric padding support for mkldnn conv_transpose
test=develop
上级 1957192f
...@@ -171,8 +171,20 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -171,8 +171,20 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
float fuse_beta = ctx.Attr<float>("fuse_beta"); float fuse_beta = ctx.Attr<float>("fuse_beta");
bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection"); bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection");
int groups = ctx.Attr<int>("groups"); int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
bool is_conv3d = strides.size() == 3U; bool is_conv3d = strides.size() == 3U;
auto input_dims = input->dims();
auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
auto filter_dims = filter->dims();
auto filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
auto ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
data_dims, strides, ksize);
PADDLE_ENFORCE( PADDLE_ENFORCE(
is_conv3d is_conv3d
? dilations.size() == 3 && dilations[0] == 1 && dilations[1] == 1 && ? dilations.size() == 3 && dilations[0] == 1 && dilations[1] == 1 &&
...@@ -435,12 +447,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -435,12 +447,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::string padding_algorithm =
ctx.Attr<std::string>("padding_algorithm");
bool is_conv3d = strides.size() == 3U; bool is_conv3d = strides.size() == 3U;
PADDLE_ENFORCE_NE(is_conv3d, true, PADDLE_ENFORCE_NE(is_conv3d, true,
"int8 does not support conv3d currently"); "int8 does not support conv3d currently");
auto input_dims = input->dims();
auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
auto filter_dims = filter->dims();
auto filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
auto ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
data_dims, strides, ksize);
int groups = ctx.Attr<int>("groups"); int groups = ctx.Attr<int>("groups");
auto weights_tz = paddle::framework::vectorize<int>(filter->dims()); auto weights_tz = paddle::framework::vectorize<int>(filter->dims());
int g = std::max(groups, 1); int g = std::max(groups, 1);
...@@ -696,6 +721,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -696,6 +721,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int groups = ctx.Attr<int>("groups"); int groups = ctx.Attr<int>("groups");
bool is_conv3d = strides.size() == 3U; bool is_conv3d = strides.size() == 3U;
...@@ -705,6 +731,17 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -705,6 +731,17 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
T* input_grad_data = nullptr; T* input_grad_data = nullptr;
T* filter_grad_data = nullptr; T* filter_grad_data = nullptr;
auto input_dims = input->dims();
auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
auto filter_dims = filter->dims();
auto filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
auto ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
data_dims, strides, ksize);
auto src_tz = paddle::framework::vectorize<int>(input->dims()); auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto weights_tz = paddle::framework::vectorize<int>(filter->dims()); auto weights_tz = paddle::framework::vectorize<int>(filter->dims());
int g = std::max(groups, 1); int g = std::max(groups, 1);
...@@ -766,10 +803,13 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -766,10 +803,13 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_NE(conv_pd, nullptr, PADDLE_ENFORCE_NE(conv_pd, nullptr,
"Fail to find conv_pd in device context"); "Fail to find conv_pd in device context");
auto mkldnn_paddings = platform::ToMkldnnPadding(paddings);
// create backward convolution weights primitive descriptor // create backward convolution weights primitive descriptor
auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc( auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc(
mkldnn::convolution_direct, src_md, diff_weights_md, diff_dst_md, mkldnn::convolution_direct, src_md, diff_weights_md, diff_dst_md,
strides, paddings, paddings, mkldnn::padding_kind::zero); strides, mkldnn_paddings[0], mkldnn_paddings[1],
mkldnn::padding_kind::zero);
auto conv_bwd_weights_pd = auto conv_bwd_weights_pd =
std::make_shared<mkldnn::convolution_backward_weights::primitive_desc>( std::make_shared<mkldnn::convolution_backward_weights::primitive_desc>(
conv_bwd_weights_desc, mkldnn_engine, *conv_pd); conv_bwd_weights_desc, mkldnn_engine, *conv_pd);
...@@ -777,7 +817,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -777,7 +817,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
// create backward convolution data primitive descriptor // create backward convolution data primitive descriptor
auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc( auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc(
mkldnn::convolution_direct, diff_src_md, weights_md, diff_dst_md, mkldnn::convolution_direct, diff_src_md, weights_md, diff_dst_md,
strides, paddings, paddings, mkldnn::padding_kind::zero); strides, mkldnn_paddings[0], mkldnn_paddings[1],
mkldnn::padding_kind::zero);
auto conv_bwd_data_pd = auto conv_bwd_data_pd =
std::make_shared<mkldnn::convolution_backward_data::primitive_desc>( std::make_shared<mkldnn::convolution_backward_data::primitive_desc>(
conv_bwd_data_desc, mkldnn_engine, *conv_pd); conv_bwd_data_desc, mkldnn_engine, *conv_pd);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/operators/conv_op.h"
#include "paddle/fluid/platform/mkldnn_reuse.h" #include "paddle/fluid/platform/mkldnn_reuse.h"
namespace paddle { namespace paddle {
...@@ -74,6 +75,18 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -74,6 +75,18 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups"); int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
auto input_dims = input->dims();
auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
auto filter_dims = filter->dims();
auto filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
auto ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
data_dims, strides, ksize);
PADDLE_ENFORCE( PADDLE_ENFORCE(
dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1, dilations.size() == 2 && dilations[0] == 1 && dilations[1] == 1,
......
...@@ -50,20 +50,26 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -50,20 +50,26 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
if (ctx.Attr<bool>("global_pooling")) { // Only 2D pooling is supported now
for (size_t i = 0; i < ksize.size(); ++i) { PADDLE_ENFORCE_EQ(ksize.size(), 2, "ksize must be 2D, i.e. 2D pooling");
paddings[i] = 0; PADDLE_ENFORCE_EQ(pooling_type == "max" || pooling_type == "avg", true,
ksize[i] = static_cast<int>(input->dims()[i + 2]); "pooling_type must be 'max' or 'avg'");
} PADDLE_ENFORCE_EQ(input->dims().size(), 4,
"Input dim must be with 4, i.e. NCHW");
auto input_dims = input->dims();
framework::DDim data_dims =
framework::slice_ddim(input_dims, 2, input_dims.size());
if (global_pooling) {
UpdateKsize(&ksize, data_dims);
} }
// Only 2D pooling is supported now UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, data_dims,
PADDLE_ENFORCE(ksize.size() == 2, "ksize must be 2D, i.e. 2D pooling"); strides, ksize);
PADDLE_ENFORCE(pooling_type == "max" || pooling_type == "avg",
"pooling_type must be 'max' or 'avg'");
PADDLE_ENFORCE(input->dims().size() == 4,
"Input dim must be with 4, i.e. NCHW");
auto src_tz = paddle::framework::vectorize<int>(input->dims()); auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto dst_tz = paddle::framework::vectorize<int>(output->dims()); auto dst_tz = paddle::framework::vectorize<int>(output->dims());
...@@ -81,6 +87,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -81,6 +87,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::shared_ptr<mkldnn::pooling_forward> pool_p; std::shared_ptr<mkldnn::pooling_forward> pool_p;
std::shared_ptr<mkldnn::memory> workspace_memory; std::shared_ptr<mkldnn::memory> workspace_memory;
if ((is_test == false) && (pooling_type == "max")) { if ((is_test == false) && (pooling_type == "max")) {
// Training // Training
workspace_memory = handler.AcquireWorkspaceMemory(); workspace_memory = handler.AcquireWorkspaceMemory();
...@@ -129,14 +136,20 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -129,14 +136,20 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize"); std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
if (ctx.Attr<bool>("global_pooling")) { auto in_x_dims = in_x->dims();
for (size_t i = 0; i < ksize.size(); ++i) { framework::DDim data_dims =
paddings[i] = 0; framework::slice_ddim(in_x_dims, 2, in_x_dims.size());
ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
} if (global_pooling) {
UpdateKsize(&ksize, data_dims);
} }
UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, data_dims,
strides, ksize);
auto& dev_ctx = auto& dev_ctx =
ctx.template device_context<platform::MKLDNNDeviceContext>(); ctx.template device_context<platform::MKLDNNDeviceContext>();
...@@ -162,6 +175,7 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -162,6 +175,7 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
std::shared_ptr<mkldnn::pooling_backward> pool_bwd_p; std::shared_ptr<mkldnn::pooling_backward> pool_bwd_p;
std::shared_ptr<mkldnn::memory> workspace_memory; std::shared_ptr<mkldnn::memory> workspace_memory;
if (pooling_type == "max") { if (pooling_type == "max") {
// Max - pooling needs Workspace // Max - pooling needs Workspace
workspace_memory = handler.AcquireWorkspaceMemory(); workspace_memory = handler.AcquireWorkspaceMemory();
......
...@@ -211,5 +211,27 @@ inline std::string CreateKey(ArgTypes&&... args) { ...@@ -211,5 +211,27 @@ inline std::string CreateKey(ArgTypes&&... args) {
return key; return key;
} }
inline std::vector<std::vector<int>> ToMkldnnPadding(
const std::vector<int>& paddings) {
if (paddings.size() == 6) {
int padding_front = paddings[0];
int padding_back = paddings[1];
int padding_top = paddings[2];
int padding_bottom = paddings[3];
int padding_left = paddings[4];
int padding_right = paddings[5];
return {{padding_front, padding_top, padding_left},
{padding_back, padding_bottom, padding_right}};
} else {
int padding_top = paddings[0];
int padding_bottom = paddings[1];
int padding_left = paddings[2];
int padding_right = paddings[3];
return {{padding_top, padding_left}, {padding_bottom, padding_right}};
}
}
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -540,13 +540,12 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward, ...@@ -540,13 +540,12 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
auto dst_md = auto dst_md =
platform::MKLDNNMemDesc(dst_dims, dt, MKLDNNMemoryFormat::any); platform::MKLDNNMemDesc(dst_dims, dt, MKLDNNMemoryFormat::any);
std::vector<int> padding_left_top(paddings); auto mkldnn_paddings = ToMkldnnPadding(paddings);
std::vector<int> padding_right_bottom(paddings);
if (ceil_mode) { if (ceil_mode) {
CorrectOutputSize(src_dims, dst_dims, ksize, paddings, strides, CorrectOutputSize(src_dims, dst_dims, ksize, paddings, strides,
padding_right_bottom); mkldnn_paddings[1]);
} }
this->AcquireForwardPrimitiveDescriptor( this->AcquireForwardPrimitiveDescriptor(
is_test ? mkldnn::prop_kind::forward_inference is_test ? mkldnn::prop_kind::forward_inference
: mkldnn::prop_kind::forward_training, : mkldnn::prop_kind::forward_training,
...@@ -555,7 +554,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward, ...@@ -555,7 +554,7 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
: (exclude_padding : (exclude_padding
? mkldnn::algorithm::pooling_avg_exclude_padding ? mkldnn::algorithm::pooling_avg_exclude_padding
: mkldnn::algorithm::pooling_avg_include_padding), : mkldnn::algorithm::pooling_avg_include_padding),
src_md, dst_md, strides, ksize, padding_left_top, padding_right_bottom, src_md, dst_md, strides, ksize, mkldnn_paddings[0], mkldnn_paddings[1],
mkldnn::padding_kind::zero); mkldnn::padding_kind::zero);
} }
...@@ -578,14 +577,16 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward, ...@@ -578,14 +577,16 @@ class PoolingMKLDNNHandler : public MKLDNNHandlerT<T, mkldnn::pooling_forward,
mkldnn::memory::desc(diff_src_dims, platform::MKLDNNGetDataType<T>(), mkldnn::memory::desc(diff_src_dims, platform::MKLDNNGetDataType<T>(),
MKLDNNMemoryFormat::any); MKLDNNMemoryFormat::any);
auto mkldnn_paddings = ToMkldnnPadding(paddings);
this->AcquireBackwardPrimitiveDescriptor( this->AcquireBackwardPrimitiveDescriptor(
pooling_type == "max" pooling_type == "max"
? mkldnn::algorithm::pooling_max ? mkldnn::algorithm::pooling_max
: (exclude_padding : (exclude_padding
? mkldnn::algorithm::pooling_avg_exclude_padding ? mkldnn::algorithm::pooling_avg_exclude_padding
: mkldnn::algorithm::pooling_avg_include_padding), : mkldnn::algorithm::pooling_avg_include_padding),
diff_src_md, diff_dst_md, strides, ksize, paddings, paddings, diff_src_md, diff_dst_md, strides, ksize, mkldnn_paddings[0],
mkldnn::padding_kind::zero); mkldnn_paddings[1], mkldnn::padding_kind::zero);
} }
std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(void) { std::shared_ptr<mkldnn::memory> AcquireWorkspaceMemory(void) {
...@@ -1035,17 +1036,19 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler { ...@@ -1035,17 +1036,19 @@ class ConvMKLDNNTemplateHandler : public MKLDNNHandler {
dev_ctx_.GetBlob(key_conv_pd)); dev_ctx_.GetBlob(key_conv_pd));
if (conv_pd_ == nullptr) { if (conv_pd_ == nullptr) {
mkldnn::memory::dims stride_dims = strides; mkldnn::memory::dims stride_dims = strides;
mkldnn::memory::dims padding_dims = paddings;
auto mkldnn_paddings = ToMkldnnPadding(paddings);
auto conv_desc = auto conv_desc =
bias ? typename forward_t::desc( bias
fwd_prop_kind, convolutional_algorithm<forward_t>::T, ? typename forward_t::desc(
src, weights, *bias, dst, stride_dims, padding_dims, fwd_prop_kind, convolutional_algorithm<forward_t>::T, src,
padding_dims, mkldnn::padding_kind::zero) weights, *bias, dst, stride_dims, mkldnn_paddings[0],
: typename forward_t::desc( mkldnn_paddings[1], mkldnn::padding_kind::zero)
fwd_prop_kind, convolutional_algorithm<forward_t>::T, : typename forward_t::desc(
src, weights, dst, stride_dims, padding_dims, fwd_prop_kind, convolutional_algorithm<forward_t>::T, src,
padding_dims, mkldnn::padding_kind::zero); weights, dst, stride_dims, mkldnn_paddings[0],
mkldnn_paddings[1], mkldnn::padding_kind::zero);
mkldnn::primitive_attr conv_attr = mkldnn::primitive_attr conv_attr =
CreatePostOps(fuse_activation, fuse_alpha, fuse_beta, CreatePostOps(fuse_activation, fuse_alpha, fuse_beta,
......
...@@ -343,5 +343,27 @@ create_test_int8_class(TestWithGroup) ...@@ -343,5 +343,27 @@ create_test_int8_class(TestWithGroup)
create_test_int8_class(TestWith1x1) create_test_int8_class(TestWith1x1)
create_test_int8_class(TestWithInput1x1Filter1x1) create_test_int8_class(TestWithInput1x1Filter1x1)
class TestConv2dOp_AsyPadding_INT_MKLDNN(TestConv2dInt8Op):
def init_kernel_type(self):
self.use_mkldnn = True
def init_paddings(self):
self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN):
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_INT_MKLDNN(TestConv2dOp_AsyPadding_INT_MKLDNN):
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.tests.unittests.op_test import OpTest from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp from paddle.fluid.tests.unittests.test_conv2d_op import TestConv2dOp, TestConv2dOp_v2
def conv2d_bias_naive(out, bias): def conv2d_bias_naive(out, bias):
...@@ -176,5 +176,26 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp): ...@@ -176,5 +176,26 @@ class TestWithInput1x1Filter1x1(TestConv2dMKLDNNOp):
self.groups = 3 self.groups = 3
class TestConv2dOp_AsyPadding_MKLDNN(TestConv2dOp_v2):
def init_kernel_type(self):
self.use_mkldnn = True
def init_paddings(self):
self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestConv2dOp_Same_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN):
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "SAME"
class TestConv2dOp_Valid_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN):
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -105,3 +105,24 @@ class TestMKLDNNWithStride(TestConv2dTransposeMKLDNNOp): ...@@ -105,3 +105,24 @@ class TestMKLDNNWithStride(TestConv2dTransposeMKLDNNOp):
self.pad = [1, 1] self.pad = [1, 1]
self.stride = [2, 2] self.stride = [2, 2]
self.input_size = [2, 3, 6, 6] # NCHW self.input_size = [2, 3, 6, 6] # NCHW
class TestMKLDNNWithAsymPad(TestConv2dTransposeMKLDNNOp):
def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self)
self.pad = [0, 0, 1, 2]
self.padding_algorithm = "EXPLICIT"
class TestMKLDNNWithSamePad(TestConv2dTransposeMKLDNNOp):
def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self)
self.pad = [0, 0]
self.padding_algorithm = "SAME"
class TestMKLDNNWithValidPad(TestConv2dTransposeMKLDNNOp):
def init_test_case(self):
TestConv2dTransposeMKLDNNOp.init_test_case(self)
self.pad = [1, 1]
self.padding_algorithm = "VALID"
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1 from paddle.fluid.tests.unittests.test_conv3d_op import TestConv3dOp, TestCase1, TestWithGroup1, TestWithGroup2, TestWith1x1, TestWithInput1x1Filter1x1, TestConv3dOp_2
class TestMKLDNN(TestConv3dOp): class TestMKLDNN(TestConv3dOp):
...@@ -55,5 +55,27 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1): ...@@ -55,5 +55,27 @@ class TestMKLDNNWithInput1x1Filter1x1(TestWithInput1x1Filter1x1):
self.data_format = "NCHW" self.data_format = "NCHW"
class TestConv3dOp_AsyPadding_MKLDNN(TestConv3dOp):
def init_kernel_type(self):
self.use_mkldnn = True
self.data_format = "NCHW"
def init_paddings(self):
self.pad = [1, 0, 1, 0, 0, 2]
self.padding_algorithm = "EXPLICIT"
class TestConv3dOp_Same_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN):
def init_paddings(self):
self.pad = [0, 0, 0]
self.padding_algorithm = "SAME"
class TestConv3dOp_Valid_MKLDNN(TestConv3dOp_AsyPadding_MKLDNN):
def init_paddings(self):
self.pad = [1, 1, 1]
self.padding_algorithm = "VALID"
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid.tests.unittests.test_pool2d_op import TestPool2D_Op, TestCase1, TestCase2, TestCase3, TestCase4, TestCase5 from paddle.fluid.tests.unittests.test_pool2d_op import *
def create_test_mkldnn_use_ceil_class(parent): def create_test_mkldnn_use_ceil_class(parent):
...@@ -53,5 +53,93 @@ create_test_mkldnn_class(TestCase3) ...@@ -53,5 +53,93 @@ create_test_mkldnn_class(TestCase3)
create_test_mkldnn_class(TestCase4) create_test_mkldnn_class(TestCase4)
create_test_mkldnn_class(TestCase5) create_test_mkldnn_class(TestCase5)
class TestAsymPad(TestPool2D_Op):
def init_test_case(self):
self.ksize = [3, 3]
self.strides = [1, 1]
def init_paddings(self):
self.paddings = [1, 0, 1, 0]
def init_pool_type(self):
self.pool_type = "avg"
self.pool2D_forward_naive = avg_pool2D_forward_naive
def init_global_pool(self):
self.global_pool = False
def init_shape(self):
self.shape = [2, 3, 7, 7]
def init_kernel_type(self):
self.use_mkldnn = True
def init_global_pool(self):
self.global_pool = False
class TestAsymPadCase1(TestAsymPad):
def init_paddings(self):
self.paddings = [1, 1, 0, 0]
class TestAsymPadCase2(TestAsymPad):
def init_paddings(self):
self.paddings = [1, 0, 1, 2]
class TestAsymPadCase3(TestAsymPad):
def init_paddings(self):
self.paddings = [1, 2, 1, 2]
class TestAsymPadCase4(TestAsymPad):
def init_paddings(self):
self.paddings = [1, 0, 1, 2]
class TestAsymPadCase5(TestAsymPad):
def init_paddings(self):
self.paddings = [2, 2, 1, 2]
class TestAsymPadMaxCase1(TestAsymPadCase1):
def init_pool_type(self):
self.pool_type = "max"
class TestAsymPadMaxCase2(TestAsymPadCase2):
def init_pool_type(self):
self.pool_type = "max"
class TestAsymPadMaxCase3(TestAsymPadCase3):
def init_pool_type(self):
self.pool_type = "max"
class TestAsymPadMaxCase4(TestAsymPadCase4):
def init_pool_type(self):
self.pool_type = "max"
class TestAsymPadMaxCase5(TestAsymPadCase5):
def init_pool_type(self):
self.pool_type = "max"
class TestAsymPadSame(TestAsymPad):
def init_paddings(self):
self.paddings = [0, 0]
self.padding_algorithm = "SAME"
class TestAsymPadValid(TestAsymPad):
def init_paddings(self):
self.paddings = [0, 0, 0, 0]
self.padding_algorithm = "VALID"
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册