From 08f63c4d1253007ee6290f8dfab3c31195940168 Mon Sep 17 00:00:00 2001 From: Michal Gallus Date: Tue, 13 Nov 2018 09:12:10 +0100 Subject: [PATCH] MKLDNN elementwise_mul: Lint changes to UT & integration test=develop --- .../operators/elementwise/elementwise_op.h | 24 ++++----- .../operators/elementwise_mul_mkldnn_op.cc | 54 +++++++++---------- .../test_elementwise_mul_mkldnn_op.py | 12 ++++- 3 files changed, 50 insertions(+), 40 deletions(-) diff --git a/paddle/fluid/operators/elementwise/elementwise_op.h b/paddle/fluid/operators/elementwise/elementwise_op.h index 16d919689cc..85a7817be9b 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_op.h @@ -98,19 +98,19 @@ class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false). Used by MKLDNN.") .SetDefault(false); AddAttr( - "x_data_format", - "(string, default NCHW) Only used in mkldnn" - "An optional string from: \"NHWC\", \"NCHW\", \"NCHW16C\", \"NCHW8C\". " - "Defaults to \"\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault(""); + "x_data_format", + "(string, default NCHW) Only used in mkldnn" + "An optional string from: \"NHWC\", \"NCHW\", \"NCHW16C\", \"NCHW8C\". " + "Defaults to \"\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault(""); AddAttr( - "y_data_format", - "(string, default \"\") Only used in mkldnn" - "An optional string from: \"NHWC\", \"NCHW\", \"NCHW16C\", \"NCHW8C\". " - "Defaults to \"\". Specify the data format of the output data, " - "the input will be transformed automatically. ") - .SetDefault(""); + "y_data_format", + "(string, default \"\") Only used in mkldnn" + "An optional string from: \"NHWC\", \"NCHW\", \"NCHW16C\", \"NCHW8C\". " + "Defaults to \"\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault(""); AddComment(string::Sprintf(R"DOC( Elementwise %s Operator diff --git a/paddle/fluid/operators/elementwise_mul_mkldnn_op.cc b/paddle/fluid/operators/elementwise_mul_mkldnn_op.cc index 58aadd00331..6371c9f8393 100644 --- a/paddle/fluid/operators/elementwise_mul_mkldnn_op.cc +++ b/paddle/fluid/operators/elementwise_mul_mkldnn_op.cc @@ -71,13 +71,13 @@ void check(const float* x, const float* y, float* z, int w) { static mkldnn::memory::format StringToMKLDNNFormat(std::string& format) { std::transform(format.begin(), format.end(), format.begin(), ::tolower); - if(!format.compare("nchw")) { + if (!format.compare("nchw")) { return memory::format::nchw; - } else if(!format.compare("nchw16c")) { + } else if (!format.compare("nchw16c")) { return memory::format::nChw16c; - } else if(!format.compare("nchw8c")) { + } else if (!format.compare("nchw8c")) { return memory::format::nChw8c; - } else if(!format.compare("nhwc")) { + } else if (!format.compare("nhwc")) { return memory::format::nhwc; } else { return memory::format::any; @@ -85,8 +85,8 @@ static mkldnn::memory::format StringToMKLDNNFormat(std::string& format) { } static void UpdateDataFormat(const framework::ExecutionContext& ctx, - framework::Tensor* tensor, const char* attribute) { - if(ctx.op().HasAttr(attribute)) { + framework::Tensor* tensor, const char* attribute) { + if (ctx.op().HasAttr(attribute)) { auto format_as_string = ctx.Attr(attribute); auto format = StringToMKLDNNFormat(format_as_string); if (format != memory::format::any) { @@ -98,19 +98,19 @@ static void UpdateDataFormat(const framework::ExecutionContext& ctx, template static void ReorderInput(framework::Tensor* tensor, const platform::Place& place, - const mkldnn::engine& engine, - bool isFourDim) { + const mkldnn::engine& engine, bool isFourDim) { using platform::to_void_cast; auto dims = paddle::framework::vectorize2int(tensor->dims()); framework::Tensor out_tensor; out_tensor.Resize(tensor->dims()); out_tensor.set_format(isFourDim ? memory::format::nchw : memory::format::nc); out_tensor.set_layout(tensor->layout()); - mkldnn::memory input_memory = {{{dims, platform::MKLDNNGetDataType(), - tensor->format()}, engine}, to_void_cast(tensor->data())}; - mkldnn::memory output_memory = {{{dims, platform::MKLDNNGetDataType(), - out_tensor.format()}, engine}, - to_void_cast(out_tensor.mutable_data(place))}; + mkldnn::memory input_memory = { + {{dims, platform::MKLDNNGetDataType(), tensor->format()}, engine}, + to_void_cast(tensor->data())}; + mkldnn::memory output_memory = { + {{dims, platform::MKLDNNGetDataType(), out_tensor.format()}, engine}, + to_void_cast(out_tensor.mutable_data(place))}; platform::Reorder(input_memory, output_memory); tensor->ShareDataWith(out_tensor); } @@ -163,21 +163,19 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel { vector_mul mul; using mul_func_t = - void (*)(const float *, const float *, float *, int, int); + void (*)(const float*, const float*, float*, int, int); - mul_func_t mul_func = (mul_func_t) mul.getCode(); + mul_func_t mul_func = (mul_func_t)mul.getCode(); - #pragma omp parallel for collapse(2) +#pragma omp parallel for collapse(2) for (int ni = 0; ni < n; ni++) { for (int ci = 0; ci < C; ci++) { auto ptr_x = - x_data + ni * C * h * w * simd_width + - ci * h * w * simd_width; + x_data + ni * C * h * w * simd_width + ci * h * w * simd_width; auto ptr_y = y_data + ni * C * simd_width + ci * simd_width; auto ptr_z = - z_data + ni * C * h * w * simd_width + - ci * h * w * simd_width; + z_data + ni * C * h * w * simd_width + ci * h * w * simd_width; mul_func(ptr_x, ptr_y, ptr_z, h, w); } @@ -189,18 +187,20 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel { } else { // Fallback to naive version: const bool are_inputs_in_same_format = x->format() == y->format(); - const bool is_x_nchw= x->format() == memory::format::nchw; + const bool is_x_nchw = x->format() == memory::format::nchw; const bool is_x_nc = x->format() == memory::format::nc; - const bool is_y_nchw= y->format() == memory::format::nchw; + const bool is_y_nchw = y->format() == memory::format::nchw; const bool is_y_nc = y->format() == memory::format::nc; - if(!are_inputs_in_same_format) { + if (!are_inputs_in_same_format) { using platform::MKLDNNDeviceContext; auto& dev_ctx = ctx.template device_context(); const auto& mkldnn_engine = dev_ctx.GetEngine(); - if(!(is_x_nchw || is_x_nc)) - ReorderInput((Tensor*)x, ctx.GetPlace(), mkldnn_engine, x->dims().size() == 4); - if(!(is_y_nchw || is_y_nc)) - ReorderInput((Tensor*)y, ctx.GetPlace(), mkldnn_engine, y->dims().size() == 4); + if (!(is_x_nchw || is_x_nc)) + ReorderInput((Tensor*)x, ctx.GetPlace(), mkldnn_engine, + x->dims().size() == 4); + if (!(is_y_nchw || is_y_nc)) + ReorderInput((Tensor*)y, ctx.GetPlace(), mkldnn_engine, + y->dims().size() == 4); } auto mul_func = [](T a, T b) -> T { return a * b; }; diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py index 77d24a81f2f..56e2ca849af 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_mkldnn_op.py @@ -20,6 +20,7 @@ import paddle.fluid.core as core from paddle.fluid.op import Operator from test_elementwise_mul_op import * + class TestElementwiseMulMKLDNNOp_BroadcastNCHW16c(ElementwiseMulOp): def init_input_output(self): x = np.random.rand(1, 16, 2, 2).astype(self.dtype) @@ -49,7 +50,9 @@ class TestElementwiseMulMKLDNNOp_BroadcastNCHW16c(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass -@unittest.skip("Not implemented yet.") # TODO(mgallus): enable when implemented. + +@unittest.skip( + "Not implemented yet.") # TODO(mgallus): enable when implemented. class TestElementwiseMulMKLDNNOp_BroadcastNCHW8c(ElementwiseMulOp): def init_input_output(self): x = np.random.rand(1, 8, 2, 2).astype(self.dtype) @@ -79,6 +82,7 @@ class TestElementwiseMulMKLDNNOp_BroadcastNCHW8c(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + class TestElementwiseMulMKLDNNOp_FallbackNCHW(ElementwiseMulOp): def init_input_output(self): self.x = np.random.rand(1, 16, 2, 2).astype(self.dtype) @@ -101,6 +105,7 @@ class TestElementwiseMulMKLDNNOp_FallbackNCHW(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + class TestElementwiseMulMKLDNNOp_FallbackNCHW16C(ElementwiseMulOp): def init_input_output(self): x = np.random.rand(1, 16, 2, 2).astype(self.dtype) @@ -130,6 +135,7 @@ class TestElementwiseMulMKLDNNOp_FallbackNCHW16C(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + class TestElementwiseMulMKLDNNOp_FallbackNoReorders(ElementwiseMulOp): def init_input_output(self): x = np.random.rand(1, 16, 2, 2).astype(self.dtype) @@ -159,6 +165,7 @@ class TestElementwiseMulMKLDNNOp_FallbackNoReorders(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + class TestElementwiseMulMKLDNNOp_FallbackWithReorder1(ElementwiseMulOp): def init_input_output(self): self.x = np.random.rand(1, 16, 2, 2).astype(self.dtype) @@ -187,6 +194,7 @@ class TestElementwiseMulMKLDNNOp_FallbackWithReorder1(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + class TestElementwiseMulMKLDNNOp_FallbackWithReorder2(ElementwiseMulOp): def init_input_output(self): self.y = np.random.rand(1, 16, 2, 2).astype(self.dtype) @@ -215,6 +223,7 @@ class TestElementwiseMulMKLDNNOp_FallbackWithReorder2(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + class TestElementwiseMulMKLDNNOp_FallbackNoReorders2(ElementwiseMulOp): def init_input_output(self): self.x = np.random.rand(1, 16).astype(self.dtype) @@ -242,5 +251,6 @@ class TestElementwiseMulMKLDNNOp_FallbackNoReorders2(ElementwiseMulOp): def test_check_grad_ingore_y(self): pass + if __name__ == '__main__': unittest.main() -- GitLab