diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 531d4110dc298e9e7dc9004b1c1fcbcaf9a0ca5a..f1a4a4df5067a7212f75fce3d2e22339340ebd47 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -226,21 +226,18 @@ void AnalysisPredictor::OptimizeInferenceProgram() { argument_.origin_program_desc.reset( new ProgramDesc(*inference_program_->Proto())); - bool use_mkldnn = config_._use_mkldnn; switch (config_.ir_mode) { case contrib::AnalysisConfig::IrPassMode::kExclude: Analyzer() .IncludeAllIrPasses() - .SetUseMkldnn(use_mkldnn) - .DisableIrPasses(use_mkldnn ? config_.ir_mkldnn_passes - : config_.ir_passes) + .SetUseMkldnn(config_._use_mkldnn) + .DisableIrPasses(config_.ir_passes) .Run(&argument_); break; case contrib::AnalysisConfig::IrPassMode::kInclude: Analyzer() - .SetUseMkldnn(use_mkldnn) - .IncludeIrPasses(use_mkldnn ? config_.ir_mkldnn_passes - : config_.ir_passes) + .SetUseMkldnn(config_._use_mkldnn) + .IncludeIrPasses(config_.ir_passes) .Run(&argument_); break; default: diff --git a/paddle/fluid/inference/api/paddle_inference_api.h b/paddle/fluid/inference/api/paddle_inference_api.h index 3416371fdbe9d5e80b90b9fb877896e1e78e101d..ab4fa820e6633db76090e4052d7f075e4e07db62 100644 --- a/paddle/fluid/inference/api/paddle_inference_api.h +++ b/paddle/fluid/inference/api/paddle_inference_api.h @@ -262,7 +262,6 @@ struct AnalysisConfig : public NativeConfig { void SetIncludeMode() { ir_mode = IrPassMode::kInclude; ir_passes = {"infer_clean_graph_pass"}; - ir_mkldnn_passes = {"infer_clean_graph_pass"}; } // Determine whether to perform graph optimization. @@ -271,8 +270,6 @@ struct AnalysisConfig : public NativeConfig { IrPassMode ir_mode{IrPassMode::kExclude}; // passes to be excluded/included std::vector ir_passes{"embedding_fc_lstm_fuse_pass"}; - // passes to be excluded/included when MKL-DNN is enabled - std::vector ir_mkldnn_passes{"embedding_fc_lstm_fuse_pass"}; // NOT stable yet. bool use_feed_fetch_ops{true};