From 5996f6232d3c3be490e27044c32d9f3690a8a6bf Mon Sep 17 00:00:00 2001 From: weishengying <63448337+weishengying@users.noreply.github.com> Date: Tue, 23 May 2023 14:56:44 +0800 Subject: [PATCH] Enabel memory optimize pass although MkLDNN is enabled (#53615) --- paddle/fluid/inference/api/analysis_config.cc | 14 -------------- paddle/phi/kernels/onednn/conv_handler.h | 16 +++++----------- 2 files changed, 5 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 612b926bd53..138675c1a48 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -968,22 +968,8 @@ void AnalysisConfig::Update() { #endif } - // TODO(inference): When we enable memory_optimize and mkldnn, PaddleSeg model - // fail. if (enable_memory_optim_) { -#ifdef PADDLE_WITH_MKLDNN - if (use_mkldnn_) { - enable_memory_optim_ = false; - LOG_FIRST_N(WARNING, 1) - << "It is detected that mkldnn and memory_optimize_pass are enabled " - "at the same time, but they are not supported yet. Currently, " - "memory_optimize_pass is explicitly disabled"; - } else { - pass_builder()->AppendAnalysisPass("memory_optimize_pass"); - } -#else pass_builder()->AppendAnalysisPass("memory_optimize_pass"); -#endif } if (use_lite_) { diff --git a/paddle/phi/kernels/onednn/conv_handler.h b/paddle/phi/kernels/onednn/conv_handler.h index fd6cf0c5778..d41d1fdf0a0 100644 --- a/paddle/phi/kernels/onednn/conv_handler.h +++ b/paddle/phi/kernels/onednn/conv_handler.h @@ -18,7 +18,9 @@ #include "paddle/phi/backends/onednn/onednn_reuse.h" #include "paddle/phi/core/expect.h" #include "paddle/phi/core/macros.h" +#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/cpu/conv_util.h" + namespace phi { namespace onednn { @@ -743,17 +745,9 @@ class ConvOneDNNHandlerT std::shared_ptr AcquireDstMemoryWithResidual( phi::DenseTensor* output, const phi::DenseTensor* residual_param) { std::shared_ptr dst_memory_p; - if (residual_param->mem_desc() != this->fwd_pd_->dst_desc()) { - auto residual_memory_p = this->AcquireResidualMemory(residual_param); - dst_memory_p = this->template AcquireDstMemory(output); - this->AcquireReorder(residual_memory_p, dst_memory_p); - } else { - // Changing ShareDataWith to TensorCopy results in performance drop - // on ResNet architectures - // (https://github.com/PaddlePaddle/Paddle/issues/22964) - output->ShareDataWith(*residual_param); - dst_memory_p = this->template AcquireDstMemory(output); - } + auto residual_memory_p = this->AcquireResidualMemory(residual_param); + dst_memory_p = this->template AcquireDstMemory(output); + this->AcquireReorder(residual_memory_p, dst_memory_p); return dst_memory_p; } }; -- GitLab