diff --git a/paddle/fluid/operators/transpose_mkldnn_op.cc b/paddle/fluid/operators/transpose_mkldnn_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..37f1cadc7d2ff248e8b6dcb3f0c8ba09f8ccd8b5 --- /dev/null +++ b/paddle/fluid/operators/transpose_mkldnn_op.cc @@ -0,0 +1,124 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/fluid/framework/data_layout_transform.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/memory/malloc.h" +#include "paddle/fluid/platform/mkldnn_reuse.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using framework::DataLayout; + +template +class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + const bool is_test = ctx.Attr("is_test"); + PADDLE_ENFORCE( + is_test == true, + "ConvTransposeMKLDNN works only for inference!. Set is_test = True"); + auto& dev_ctx = + ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + std::vector axis = ctx.Attr>("axis"); + int ndims = axis.size(); + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + const T* input_data = input->data(); + + if (ndims == 1) { + output->ShareDataWith(*input); + return; + } + + std::vector nchw_axis(ndims, 0); + for (size_t i = 0; i < nchw_axis.size(); ++i) { + nchw_axis[i] = i; + } + + std::vector nchw_tz = paddle::framework::vectorize2int(input->dims()); + std::string data_format = ctx.Attr("data_format"); + + auto src_md = + input->format() != mkldnn::memory::format::nchw + ? platform::MKLDNNMemDesc(nchw_tz, platform::MKLDNNGetDataType(), + input->format()) + : Axis2MemoryDesc(nchw_tz, nchw_axis); + + this->TransposeKernel(ctx.GetPlace(), Axis2MemoryDesc(nchw_tz, axis), + src_md, output, input_data, nchw_tz, mkldnn_engine); + } + + protected: + mkldnn::memory::desc Axis2MemoryDesc(std::vector& nchw_tz, + std::vector& axis) const { + mkldnn_memory_desc_t mem_fmt; + + mem_fmt.primitive_kind = mkldnn_memory; + mem_fmt.ndims = axis.size(); + for (unsigned int i = 0; i < nchw_tz.size(); ++i) { + mem_fmt.dims[i] = nchw_tz[i]; // logical dimensions (nchw format, + // regardless physical layout) + } + mem_fmt.data_type = mkldnn_f32; + mem_fmt.format = mkldnn_blocked; + + unsigned int total_stride = 1; + for (int i = nchw_tz.size() - 1; i >= 0; --i) { + mem_fmt.layout_desc.blocking.padding_dims[i] = + nchw_tz[i]; // logical dimensions (nchw format, regardless physical + // layout) + mem_fmt.layout_desc.blocking.block_dims[i] = 1; + mem_fmt.layout_desc.blocking.offset_padding_to_data[i] = 0; // no offset + mem_fmt.layout_desc.blocking.strides[0][axis[i]] = total_stride; + mem_fmt.layout_desc.blocking.strides[1][axis[i]] = 1; + total_stride *= nchw_tz[axis[i]]; + } + mem_fmt.layout_desc.blocking.offset_padding = 0; // no initial offset + return mem_fmt; + } + + void TransposeKernel(platform::Place place, mkldnn::memory::desc md_o, + mkldnn::memory::desc md_i, Tensor* output, + const T* data_i, std::vector& nchw_dims, + const mkldnn::engine& eng) const { + // Make Memory primitive descriptors + auto mpd_o = mkldnn::memory::primitive_desc(md_o, eng); + auto mpd_i = mkldnn::memory::primitive_desc(md_i, eng); + + auto data_o = output->mutable_data( + place, paddle::memory::Allocator::kDefault, mpd_o.get_size()); + + auto src = mkldnn::memory(mpd_i, (T*)(data_i)); + auto dst = mkldnn::memory(mpd_o, data_o); + + auto r = mkldnn::reorder(src, dst); + mkldnn::stream(mkldnn::stream::kind::eager).submit({r}).wait(); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_KERNEL(transpose2, MKLDNN, ::paddle::platform::CPUPlace, + ops::TransposeMKLDNNOpKernel); +REGISTER_OP_KERNEL(transpose, MKLDNN, ::paddle::platform::CPUPlace, + ops::TransposeMKLDNNOpKernel); diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index bc1f59bc1a7641764d1a76fc54ebe835f50aee3d..b3b379d16ff099ba244fc92ed149a0089c2750e4 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -16,6 +16,10 @@ limitations under the License. */ #include #include +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + namespace paddle { namespace operators { @@ -53,11 +57,32 @@ class TransposeOp : public framework::OperatorWithKernel { } ctx->SetOutputDim("Out", out_dims); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; + } +#endif + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.GetPlace(), layout_, library_); + } }; class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: void Make() override { + AddAttr("is_test", + "(bool, default false) Set to true for inference only, false " + "for training. Some layers may run faster when this is true.") + .SetDefault(false); AddInput( "X", "(Tensor) The input tensor, tensors with rank up to 6 are supported."); @@ -67,6 +92,16 @@ class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { "(vector) A list of values, and the size of the list should be " "the same with the input tensor rank. This operator permutes the input " "tensor's axes according to the values given."); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); + AddAttr( + "data_format", + "(string, default NCHW) Only used in " + "An optional string from: \"NHWC\", \"NCHW\". " + "Defaults to \"NHWC\". Specify the data format of the output data, " + "the input will be transformed automatically. ") + .SetDefault("AnyLayout"); AddComment(R"DOC( Transpose Operator. @@ -144,8 +179,18 @@ class Transpose2Op : public TransposeOp { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType(ctx.Input("X")->type(), - ctx.device_context()); + framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; + } +#endif + return framework::OpKernelType(ctx.Input("X")->type(), + ctx.GetPlace(), layout_, library_); } }; diff --git a/python/paddle/fluid/tests/unittests/test_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_transpose_mkldnn_op.py new file mode 100644 index 0000000000000000000000000000000000000000..61ac8790112ceadfdef7b18aad70af77644581cd --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_transpose_mkldnn_op.py @@ -0,0 +1,76 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest + +from test_transpose_op import TestTransposeOp + + +class TestTransposeMKLDNN(TestTransposeOp): + def init_op_type(self): + self.op_type = "transpose2" + self.use_mkldnn = True + self.is_test = True + return + + def test_check_grad(self): + return + + def test_check_grad_no_input(self): + return + + def test_check_grad_no_filter(self): + return + + +class TestCase0MKLDNN(TestTransposeMKLDNN): + def initTestCase(self): + self.shape = (3, ) + self.axis = (0, ) + + +class TestCase1a(TestTransposeMKLDNN): + def initTestCase(self): + self.shape = (3, 4, 5) + self.axis = (0, 2, 1) + + +class TestCase1b(TestTransposeMKLDNN): + def initTestCase(self): + self.shape = (3, 4, 5) + self.axis = (2, 1, 0) + + +class TestCase2(TestTransposeMKLDNN): + def initTestCase(self): + self.shape = (2, 3, 4, 5) + self.axis = (0, 2, 3, 1) + + +class TestCase3(TestTransposeMKLDNN): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6) + self.axis = (4, 2, 3, 1, 0) + + +class TestCase4(TestTransposeMKLDNN): + def initTestCase(self): + self.shape = (2, 3, 4, 5, 6, 1) + self.axis = (4, 2, 3, 1, 0, 5) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index bbcabb751f0761705ff268c4408dc8673bb01b81..93be9d28da7a73f4fa972acf0dbd95167e7dfca3 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -21,15 +21,24 @@ from op_test import OpTest class TestTransposeOp(OpTest): def setUp(self): + self.init_op_type() self.initTestCase() - self.op_type = "transpose2" self.inputs = {'X': np.random.random(self.shape).astype("float32")} - self.attrs = {'axis': list(self.axis)} + self.attrs = { + 'axis': list(self.axis), + 'use_mkldnn': self.use_mkldnn, + 'is_test': self.is_test, + } self.outputs = { 'XShape': np.random.random(self.shape).astype("float32"), 'Out': self.inputs['X'].transpose(self.axis) } + def init_op_type(self): + self.op_type = "transpose2" + self.use_mkldnn = False + self.is_test = False + def test_check_output(self): self.check_output(no_check_set=['XShape'])