未验证 提交 6ef6a918 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #13727 from Sand3r-/mgallus/enable-mkldnn-naive-exe

Enable MKL-DNN in Naive Executor and Analysis Predictor
...@@ -146,5 +146,22 @@ void NaiveExecutor::CleanFeedFetchOps() { ...@@ -146,5 +146,22 @@ void NaiveExecutor::CleanFeedFetchOps() {
ops_.swap(ops); ops_.swap(ops);
} }
void NaiveExecutor::EnableMKLDNN(const ProgramDesc &program) {
#ifdef PADDLE_WITH_MKLDNN
VLOG(3) << "use_mkldnn=True";
for (size_t block_id = 0; block_id < program.Size(); ++block_id) {
auto *block = const_cast<ProgramDesc &>(program).MutableBlock(block_id);
for (auto *op : block->AllOps()) {
if (op->HasAttr("use_mkldnn")) {
op->SetAttr("use_mkldnn", true);
}
}
}
#else
LOG(WARNING)
<< "'MKLDNN' is not supported, Please re-compile with WITH_MKLDNN option";
#endif
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#pragma once #pragma once
#include <string>
#include <vector>
#include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/program_desc.h"
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
...@@ -46,6 +48,8 @@ class NaiveExecutor { ...@@ -46,6 +48,8 @@ class NaiveExecutor {
void CleanFeedFetchOps(); void CleanFeedFetchOps();
void EnableMKLDNN(const ProgramDesc& program);
protected: protected:
void CreateVariables(const ProgramDesc& desc, Scope* scope, int block_id); void CreateVariables(const ProgramDesc& desc, Scope* scope, int block_id);
......
...@@ -71,6 +71,11 @@ bool AnalysisPredictor::Init( ...@@ -71,6 +71,11 @@ bool AnalysisPredictor::Init(
} else { } else {
inference_program_ = program; inference_program_ = program;
} }
if (config_._use_mkldnn) {
executor_->EnableMKLDNN(*inference_program_);
}
executor_->Prepare(scope_.get(), *inference_program_, 0, executor_->Prepare(scope_.get(), *inference_program_, 0,
config_.use_feed_fetch_ops); config_.use_feed_fetch_ops);
...@@ -92,6 +97,7 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs, ...@@ -92,6 +97,7 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
LOG(ERROR) << "fail to set feed"; LOG(ERROR) << "fail to set feed";
return false; return false;
} }
// Run the inference program // Run the inference program
// if share variables, we need not create variables // if share variables, we need not create variables
executor_->Run(); executor_->Run();
......
...@@ -61,8 +61,6 @@ void SetConfig(AnalysisConfig *cfg) { ...@@ -61,8 +61,6 @@ void SetConfig(AnalysisConfig *cfg) {
cfg->ir_passes.push_back("fc_gru_fuse_pass"); cfg->ir_passes.push_back("fc_gru_fuse_pass");
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
cfg->_use_mkldnn = true; cfg->_use_mkldnn = true;
// disable mkldnn fuse since it should have some bugs
cfg->ir_passes.push_back("conv_relu_mkldnn_fuse_pass");
#endif #endif
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册