From 162ac048384fc3ff927668126e138488837e7c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C5=82awomir=20Siwek?= Date: Wed, 17 Nov 2021 16:26:29 +0100 Subject: [PATCH] Replace custom IOHW -> OIHW reorder with build-in oneDNN reorder (#37175) * Use oneDNN reorder instead of custom one * Fix whitespace typo * Fix Code format error * Incorporating feedback * Remove unncessary reorder * Support GIOHW format * Fix code format error --- .../mkldnn/conv_transpose_mkldnn_op.cc | 37 ++----------------- 1 file changed, 4 insertions(+), 33 deletions(-) diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index b68c950aa9..04ff37222e 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -68,7 +68,7 @@ class ConvTransposeMKLDNNHandlerT PADDLE_ENFORCE_EQ( filter->layout(), DataLayout::kMKLDNN, platform::errors::InvalidArgument( - "The filter tensor's laytout should be %d, but got %d.", + "The filter tensor's layout should be %d, but got %d.", DataLayout::kMKLDNN, filter->layout())); PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef, platform::errors::InvalidArgument( @@ -227,34 +227,12 @@ class ConvTransposeMKLDNNHandlerT auto user_src_md = platform::MKLDNNMemDesc( weights_tz, platform::MKLDNNGetDataType(), - (g == 1) ? filter->format() : MKLDNNMemoryFormat::goihw); - - auto iohw_weights_tz = framework::vectorize(filter->dims()); - // Custom Reorder from IOHW to OIHW - auto iohw2oihw_reorder = - [&iohw_weights_tz](const K* filter_data) -> std::shared_ptr { - int o = iohw_weights_tz[1]; - int c = iohw_weights_tz[0]; - int h = iohw_weights_tz[2]; - int w = iohw_weights_tz[3]; - std::shared_ptr reordered_filter_data(new K[o * c * h * w](), - std::default_delete()); - for (int i = 0; i < c; ++i) { - for (int j = 0; j < o; ++j) { - int in_offset = j * h * w + i * o * h * w; - int out_offset = j * c * h * w + i * h * w; - std::memcpy(&(reordered_filter_data.get())[out_offset], - &filter_data[in_offset], h * w * sizeof(K)); - } - } - - return reordered_filter_data; - }; + (g == 1) ? MKLDNNMemoryFormat::iohw : MKLDNNMemoryFormat::giohw); return this->template AcquireMemoryWithReorder( dev_ctx, user_src_md, this->fwd_pd_->weights_desc(), - platform::to_void_cast(filter_data), key, "@weights_mem_p", is_test_, - iohw2oihw_reorder); + platform::to_void_cast(filter_data), key, "@weights_mem_p", + is_test_); } template @@ -263,7 +241,6 @@ class ConvTransposeMKLDNNHandlerT const mkldnn::memory::desc& user_md, const mkldnn::memory::desc& target_md, void* ptr, const std::string& key, const std::string& suffix, bool is_persistent = false, - std::function(const F*)> custom_reorder_func = {}, const std::vector& scale_data = {1.0f}, int mask = 0) { const auto target_key = key + suffix + "_target"; const auto key_reorder_p = key + suffix + "reorder_p"; @@ -273,12 +250,6 @@ class ConvTransposeMKLDNNHandlerT std::static_pointer_cast(dev_ctx.GetBlob(target_key)); if (target_memory_p == nullptr) { - if (custom_reorder_func) { - auto reordered_data = - custom_reorder_func(reinterpret_cast(ptr)); - dev_ctx.SetBlob(key_reorder_p + "-custom_reorder", reordered_data); - ptr = reinterpret_cast(reordered_data.get()); - } auto user_memory_p = std::make_shared(user_md, this->engine_, ptr); if (user_md != target_md) { -- GitLab