提交 627ca4a3 编写于 作者: W Wojciech Uss 提交者: Sylwester Fraczek

fix repeating passes (#16606)

上级 fe240d96
...@@ -141,7 +141,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) { ...@@ -141,7 +141,6 @@ AnalysisConfig::AnalysisConfig(const AnalysisConfig &other) {
void AnalysisConfig::EnableMKLDNN() { void AnalysisConfig::EnableMKLDNN() {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
pass_builder()->EnableMKLDNN();
use_mkldnn_ = true; use_mkldnn_ = true;
#else #else
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN"; LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
...@@ -234,16 +233,13 @@ void AnalysisConfig::Update() { ...@@ -234,16 +233,13 @@ void AnalysisConfig::Update() {
} }
if (use_mkldnn_) { if (use_mkldnn_) {
#ifdef PADDLE_WITH_MKLDNN
if (!enable_ir_optim_) { if (!enable_ir_optim_) {
LOG(ERROR) LOG(ERROR)
<< "EnableMKLDNN() only works when IR optimization is enabled."; << "EnableMKLDNN() only works when IR optimization is enabled.";
} } else {
#ifdef PADDLE_WITH_MKLDNN
pass_builder()->EnableMKLDNN(); pass_builder()->EnableMKLDNN();
use_mkldnn_ = true; }
#else
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
use_mkldnn_ = false;
#endif #endif
} }
...@@ -255,9 +251,6 @@ void AnalysisConfig::Update() { ...@@ -255,9 +251,6 @@ void AnalysisConfig::Update() {
} }
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
pass_builder()->EnableMkldnnQuantizer(); pass_builder()->EnableMkldnnQuantizer();
#else
LOG(ERROR) << "Please compile with MKLDNN first to use MkldnnQuantizer";
use_mkldnn_quantizer_ = false;
#endif #endif
} }
......
...@@ -64,10 +64,12 @@ void PaddlePassBuilder::DeletePass(size_t idx) { ...@@ -64,10 +64,12 @@ void PaddlePassBuilder::DeletePass(size_t idx) {
passes_.erase(std::begin(passes_) + idx); passes_.erase(std::begin(passes_) + idx);
} }
void GpuPassStrategy::EnableMKLDNN() { void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) {
LOG(ERROR) << "GPU not support MKLDNN yet"; analysis_passes_.push_back(pass);
} }
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
// The following passes works for Anakin sub-graph engine. // The following passes works for Anakin sub-graph engine.
const std::vector<std::string> kAnakinSubgraphPasses({ const std::vector<std::string> kAnakinSubgraphPasses({
"infer_clean_graph_pass", // "infer_clean_graph_pass", //
...@@ -102,12 +104,12 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) { ...@@ -102,12 +104,12 @@ GpuPassStrategy::GpuPassStrategy() : PassStrategy({}) {
use_gpu_ = true; use_gpu_ = true;
} }
void GpuPassStrategy::EnableMkldnnQuantizer() { void GpuPassStrategy::EnableMKLDNN() {
LOG(ERROR) << "GPU not support MKL-DNN quantization"; LOG(ERROR) << "GPU not support MKLDNN yet";
} }
void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) { void GpuPassStrategy::EnableMkldnnQuantizer() {
analysis_passes_.push_back(pass); LOG(ERROR) << "GPU not support MKL-DNN quantization";
} }
CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
...@@ -135,5 +137,39 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) { ...@@ -135,5 +137,39 @@ CpuPassStrategy::CpuPassStrategy() : PassStrategy({}) {
}); });
use_gpu_ = false; use_gpu_ = false;
} }
void PaddlePassBuilder::ClearPasses() { passes_.clear(); }
void CpuPassStrategy::EnableMKLDNN() {
// TODO(Superjomn) Consider the way to mix CPU with GPU.
#ifdef PADDLE_WITH_MKLDNN
if (!use_mkldnn_) {
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
for (auto &pass : std::vector<std::string>(
{"depthwise_conv_mkldnn_pass", //
"conv_bn_fuse_pass", // Execute BN passes again to
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
"conv_bias_mkldnn_fuse_pass", //
"conv3d_bias_mkldnn_fuse_pass", //
"conv_elementwise_add_mkldnn_fuse_pass",
"conv_relu_mkldnn_fuse_pass"})) {
passes_.push_back(pass);
}
}
use_mkldnn_ = true;
#else
use_mkldnn_ = false;
#endif
}
void CpuPassStrategy::EnableMkldnnQuantizer() {
#ifdef PADDLE_WITH_MKLDNN
if (!use_mkldnn_quantizer_) {
passes_.push_back("cpu_quantize_placement_pass");
}
use_mkldnn_quantizer_ = true;
#else
use_mkldnn_quantizer_ = false;
#endif
}
} // namespace paddle } // namespace paddle
...@@ -109,43 +109,16 @@ class CpuPassStrategy : public PassStrategy { ...@@ -109,43 +109,16 @@ class CpuPassStrategy : public PassStrategy {
CpuPassStrategy(); CpuPassStrategy();
explicit CpuPassStrategy(const CpuPassStrategy &other) explicit CpuPassStrategy(const CpuPassStrategy &other)
: PassStrategy(other.AllPasses()) {} : PassStrategy(other.AllPasses()) {
use_gpu_ = other.use_gpu_;
use_mkldnn_ = other.use_mkldnn_;
use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
}
virtual ~CpuPassStrategy() = default; virtual ~CpuPassStrategy() = default;
void EnableMKLDNN() override { void EnableMKLDNN() override;
// TODO(Superjomn) Consider the way to mix CPU with GPU. void EnableMkldnnQuantizer() override;
#ifdef PADDLE_WITH_MKLDNN
if (!use_mkldnn_) {
passes_.insert(passes_.begin(), "mkldnn_placement_pass");
for (auto &pass : std::vector<std::string>(
{"depthwise_conv_mkldnn_pass", //
"conv_bn_fuse_pass", // Execute BN passes again to
"conv_eltwiseadd_bn_fuse_pass", // preserve correct pass order
"conv_bias_mkldnn_fuse_pass", //
"conv3d_bias_mkldnn_fuse_pass", //
"conv_relu_mkldnn_fuse_pass", //
"conv_elementwise_add_mkldnn_fuse_pass"})) {
passes_.push_back(pass);
}
}
use_mkldnn_ = true;
#else
use_mkldnn_ = false;
#endif
}
void EnableMkldnnQuantizer() override {
#ifdef PADDLE_WITH_MKLDNN
if (!use_mkldnn_quantizer_) {
passes_.push_back("cpu_quantize_placement_pass");
}
use_mkldnn_quantizer_ = true;
#else
use_mkldnn_quantizer_ = false;
#endif
}
protected: protected:
bool use_mkldnn_quantizer_{false}; bool use_mkldnn_quantizer_{false};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册