diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index a236ff07fe7cc769d596bdc48e8f5e7cc0b7cf34..394930baffece7da1a96d19e1abe9a2fb58e32f0 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -113,7 +113,6 @@ void* GetDataFromTensor(const Tensor& tensor, mkldnn::memory::data_type type) { PADDLE_THROW("wrong mkldnn type provided"); } } -#endif void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var, const OpKernelType& expected_kernel_type, @@ -127,14 +126,11 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var, "TransDataLayoutFromMKLDNN only supports transform from MKLDNN to " "non-MKLDNN"); -#ifdef PADDLE_WITH_MKLDNN innerTransDataLayoutFromMKLDNN(in_layout, paddle::platform::get_cur_paddle_data_layout(), in, out, place); -#endif } -#ifdef PADDLE_WITH_MKLDNN void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout, const Tensor& in, Tensor* out, platform::Place place) { diff --git a/paddle/fluid/framework/data_layout_transform.h b/paddle/fluid/framework/data_layout_transform.h index 6a6a531ea4a1d76416ae8e0e4e155422f44060c1..8a8342992181bfd00bf10d67cb825c956101d0b8 100644 --- a/paddle/fluid/framework/data_layout_transform.h +++ b/paddle/fluid/framework/data_layout_transform.h @@ -69,11 +69,11 @@ inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) { void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout, const Tensor& in, Tensor* out, platform::Place place); -#endif void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var, const OpKernelType& expected_kernel_type, const Tensor& in, Tensor* out); +#endif std::vector GetAxis(const DataLayout& from, const DataLayout& to); diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 7fb3f1f7d7b2249f4ff94e32bfb6c6682c14fe76..95cb4f48f576aa627ffba82b372623a08b2233aa 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -43,13 +43,13 @@ void TransformData(const OpKernelType &expected_kernel_type, // do layout transform if (NeedTransformLayout(lout, lin)) { +#ifdef PADDLE_WITH_MKLDNN if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) { PADDLE_ENFORCE( !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), "No layout transform needed between two MKLDNN OPKernels"); if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) { -#ifdef PADDLE_WITH_MKLDNN // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel // Just set layout/format. No real transform occur @@ -67,7 +67,6 @@ void TransformData(const OpKernelType &expected_kernel_type, } out.set_layout(DataLayout::kMKLDNN); out.set_format(out_format); -#endif } else { // Case2 - transfrom from MKLDNN OPKernel to Non-MKLDNN OPKernel // Do transform via MKLDNN lib @@ -78,6 +77,10 @@ void TransformData(const OpKernelType &expected_kernel_type, // Case3 - transfrom between Non-MKLDNN OPKernels TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out); } +#else + // Case3 - transfrom between Non-MKLDNN OPKernels + TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out); +#endif transformed = true; PassTensorData(&out, &in); } diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 263e7e5d5496f668b65997d5322d9a2bc8ab3da1..bf42821a7fd6a0ca8a0cc8630986feef444b5c3d 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -48,7 +48,11 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { int groups = ctx->Attrs().Get("groups"); std::vector dilations = ctx->Attrs().Get>("dilations"); const std::string data_format = ctx->Attrs().Get("data_format"); - const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC"); + + // MKL-DNN Kernels are using NCHW order of dims description + // so we ignore data_format consideration for MKL-DNN kernel + const bool channel_last = (this->IsMKLDNNType() == false) && + (data_format == "NHWC" || data_format == "NDHWC"); PADDLE_ENFORCE_EQ( in_dims.size() == 4 || in_dims.size() == 5, true, @@ -151,15 +155,6 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( #ifdef PADDLE_WITH_MKLDNN if (library == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { - // TODO(jczaja): Add support for NHWC - const std::string data_format = ctx.Attr("data_format"); - PADDLE_ENFORCE_NE(data_format, "NHWC", - platform::errors::Unimplemented( - "Conv MKLDNN does not support NHWC data format yet")); - PADDLE_ENFORCE_NE( - data_format, "NDHWC", - platform::errors::Unimplemented( - "Conv MKLDNN does not support NDHWC data format yet")); library = framework::LibraryType::kMKLDNN; layout = framework::DataLayout::kMKLDNN; customized_type_value = @@ -197,6 +192,32 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( return type; } +framework::OpKernelType ConvOp::GetKernelTypeForVar( + const std::string& var_name, const Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const { +#ifdef PADDLE_WITH_MKLDNN + // Only input require reshaping, weights and + // bias are having shape in NCHW order + if ((var_name == "Input") && + (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) && + (tensor.layout() != framework::DataLayout::kMKLDNN)) { + auto attrs = Attrs(); + auto ar = paddle::framework::AttrReader(attrs); + const std::string data_format = ar.Get("data_format"); + auto dl = framework::StringToDataLayout(data_format); + // Some models may have intentionally set "AnyLayout" for pool + // op. Treat this as NCHW (default data_format value) + if (dl != framework::DataLayout::kAnyLayout) { + return framework::OpKernelType( + expected_kernel_type.data_type_, tensor.place(), + framework::StringToDataLayout(data_format)); + } + } +#endif + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); +} + void Conv2DOpMaker::Make() { AddAttr("is_test", "(bool, default false) Set to true for inference only, false " diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 30cc31c7ddc250013b4853e9ef000fda05f38b80..20b3f84f23208b247f3e5f228c1aa39db3d7c7c7 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -258,6 +258,10 @@ class ConvOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; + + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const override; }; class ConvOpGrad : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 25c8450140044fe5c73dd4a64cb52f033fa0ae9d..81f099d7c1cb8f977e51c952c9a418842d23359c 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -48,8 +48,9 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { ctx->Attrs().Get("padding_algorithm"); const std::string data_layout_str = ctx->Attrs().Get("data_format"); - const framework::DataLayout data_layout = - framework::StringToDataLayout(data_layout_str); + const DataLayout data_layout = + this->IsMKLDNNType() ? DataLayout::kNCHW + : framework::StringToDataLayout(data_layout_str); PADDLE_ENFORCE_EQ(in_dims.size() == 4 || in_dims.size() == 5, true, "ShapeError: input of Op(conv_transpose) should be 4-D or " @@ -145,11 +146,6 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( #ifdef PADDLE_WITH_MKLDNN if (library_ == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { - // TODO(jczaja): Add support for NHWC - const std::string data_format = ctx.Attr("data_format"); - PADDLE_ENFORCE_NE( - data_format, "NHWC", - "Conv Transpose MKLDNN does not support NHWC data format yet"); library_ = framework::LibraryType::kMKLDNN; layout_ = framework::DataLayout::kMKLDNN; } @@ -160,6 +156,32 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( layout_, library_); } +framework::OpKernelType ConvTransposeOp::GetKernelTypeForVar( + const std::string& var_name, const Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const { +#ifdef PADDLE_WITH_MKLDNN + // Only input require reshaping, weights and + // bias are having shape in NCHW order + if ((var_name == "Input") && + (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) && + (tensor.layout() != framework::DataLayout::kMKLDNN)) { + auto attrs = Attrs(); + auto ar = paddle::framework::AttrReader(attrs); + const std::string data_format = ar.Get("data_format"); + auto dl = framework::StringToDataLayout(data_format); + // Some models may have intentionally set "AnyLayout" for pool + // op. Treat this as NCHW (default data_format value) + if (dl != framework::DataLayout::kAnyLayout) { + return framework::OpKernelType( + expected_kernel_type.data_type_, tensor.place(), + framework::StringToDataLayout(data_format)); + } + } +#endif + return framework::OpKernelType(expected_kernel_type.data_type_, + tensor.place(), tensor.layout()); +} + void Conv2DTransposeOpMaker::Make() { AddAttr("is_test", "(bool, default false) Set to true for inference only, false " diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index 9e1b4f3e362e99f68ba4e38f4c97957e82373b51..59b3677acc41658936dc678d9810c923a80bf6e1 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -98,6 +98,10 @@ class ConvTransposeOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override; + + framework::OpKernelType GetKernelTypeForVar( + const std::string& var_name, const Tensor& tensor, + const framework::OpKernelType& expected_kernel_type) const override; }; class ConvTransposeOpGrad : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index f9ca40f870b44e61469fccb54b339cb6ce98be63..de6dea91ea20d453cc50ab55c5e9be60c99c652e 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -220,9 +220,14 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { * ('any') which lets a primitive (convolution in this case) choose * the memory format preferred for best performance */ + // TODO(jczaja): This is workaround to make grad op UT's numerical + // gradient computation proper as this op is called directly without + // fetch op following it , so numercial grad is computed (in python) + // using block formats which will give wrong results std::string data_format = ctx.Attr("data_format"); auto chosen_memory_format = - platform::data_format_to_memory_format(data_format); + is_test ? MKLDNNMemoryFormat::any + : platform::data_format_to_memory_format(data_format); weights_format = MKLDNNMemoryFormat::any; // Check the format for user's special output @@ -519,9 +524,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { * ('any') which lets a primitive (convolution in this case) choose * the memory format preferred for best performance */ - std::string data_format = ctx.Attr("data_format"); - auto chosen_memory_format = - platform::data_format_to_memory_format(data_format); + auto chosen_memory_format = MKLDNNMemoryFormat::any; std::vector bias_tz; @@ -772,18 +775,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { * ('any') which lets a primitive (conv backward in this case) choose * the memory format preferred for best performance */ - std::string data_format = ctx.Attr("data_format"); - auto chosen_memory_format = - platform::data_format_to_memory_format(data_format); - + auto chosen_memory_format = MKLDNNMemoryFormat::any; weights_format = MKLDNNMemoryFormat::any; - // Check the format for user's special output - if (chosen_memory_format != MKLDNNMemoryFormat::any) { - if (is_conv3d) { - chosen_memory_format = - platform::MKLDNNFormatForSize(src_tz.size(), chosen_memory_format); - } - } auto src_md = platform::MKLDNNMemDesc( src_tz, platform::MKLDNNGetDataType(), chosen_memory_format); diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index 74bc1eb2865ef91ddcb6d9b8f9bdbbfe19a9e514..958b8906415e752e69e09a438580ca53e210509f 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -156,9 +156,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { * ('any') which lets a primitive (convolution in this case) choose * the memory format preferred for best performance */ - std::string data_format = ctx.Attr("data_format"); - auto chosen_memory_format = - platform::data_format_to_memory_format(data_format); + auto chosen_memory_format = MKLDNNMemoryFormat::any; std::string fuse_activation = ctx.Attr("fuse_activation"); float fuse_alpha = ctx.Attr("fuse_alpha"); float fuse_beta = ctx.Attr("fuse_beta"); diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py index 4012ab00f9a8f1a0cb24fec3aae4148dfa99c7b5..7a494e3c2c3040356641d05772c883e15e4579e3 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -35,7 +35,7 @@ class TestConv2dInt8Op(TestConv2dOp): self.exhaustive_search = False self.use_cuda = False self.use_mkldnn = False - self.data_format = "AnyLayout" + self.data_format = "NCHW" self.weighttype = np.float32 self.use_mkldnn = True self.init_group() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py index 3ca1762f0f3ab8afc20ff0b4cc1d110b3726ee9c..69d2d23b1adf84b80fc6936b28c03808f94d693c 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_mkldnn_op.py @@ -197,5 +197,38 @@ class TestConv2dOp_Valid_MKLDNN(TestConv2dOp_AsyPadding_MKLDNN): self.padding_algorithm = "VALID" +class TestConv2dOp_Valid_NHWC_MKLDNN(TestConv2dOp_Valid_MKLDNN): + def init_data_format(self): + self.data_format = "NHWC" + + def init_test_case_2(self): + N, C, H, W = self.input_size + self.input_size = [N, H, W, C] + + #TODO(jczaja): Enable once GRAD op is adjusted + def test_check_grad(self): + pass + + #TODO(jczaja): Enable once GRAD op is adjusted + def test_check_grad_no_filter(self): + pass + + #TODO(jczaja): Enable once GRAD op is adjusted + def test_check_grad_no_input(self): + pass + + +class TestConv2dOp_Same_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): + def init_paddings(self): + self.pad = [0, 0] + self.padding_algorithm = "SAME" + + +class TestConv2dOp_AsyPadding_NHWC_MKLDNN(TestConv2dOp_Valid_NHWC_MKLDNN): + def init_paddings(self): + self.pad = [0, 0, 1, 2] + self.padding_algorithm = "EXPLICIT" + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py index 428c093edf05bfd66bcc2e6a42d2e52f30426f35..a9b26b86c23b787d79d863bc4cc82bc20c0dec75 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -126,3 +126,11 @@ class TestMKLDNNWithValidPad(TestConv2dTransposeMKLDNNOp): TestConv2dTransposeMKLDNNOp.init_test_case(self) self.pad = [1, 1] self.padding_algorithm = "VALID" + + +class TestMKLDNNWithValidPad_NHWC(TestMKLDNNWithValidPad): + def init_test_case(self): + super(TestMKLDNNWithValidPad, self).init_test_case() + self.data_format = "NHWC" + N, C, H, W = self.input_size + self.input_size = [N, H, W, C]