diff --git a/paddle/fluid/inference/tests/api/analyzer_bfloat16_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_bfloat16_image_classification_tester.cc index f267f0f28d685e51f0359a345c52fbbe4a49fa16..267fb17ee6baa6ed8c035d32e29558eae24f3c19 100644 --- a/paddle/fluid/inference/tests/api/analyzer_bfloat16_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_bfloat16_image_classification_tester.cc @@ -16,6 +16,8 @@ limitations under the License. */ #include "paddle/fluid/inference/tests/api/tester_helper.h" #include "paddle/fluid/platform/cpu_info.h" +DEFINE_bool(enable_mkldnn, true, "Enable MKLDNN"); + namespace paddle { namespace inference { namespace analysis { @@ -31,7 +33,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->SwitchIrOptim(); cfg->SwitchSpecifyInputNames(); cfg->SetCpuMathLibraryNumThreads(FLAGS_num_threads); - cfg->EnableMKLDNN(); + if (FLAGS_enable_mkldnn) cfg->EnableMKLDNN(); } TEST(Analyzer_bfloat16_image_classification, bfloat16) { @@ -44,7 +46,7 @@ TEST(Analyzer_bfloat16_image_classification, bfloat16) { // read data from file and prepare batches with test data std::vector> input_slots_all; SetInputs(&input_slots_all); - if (FLAGS_enable_bf16 && + if (FLAGS_enable_mkldnn && FLAGS_enable_bf16 && platform::MayIUse(platform::cpu_isa_t::avx512_bf16)) { b_cfg.EnableMkldnnBfloat16(); } else { diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc index b07163b518b529e7ab01107e1f0d217443f574bd..d11b5f0c218f22e53a43959799e57e25baba941f 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc @@ -17,6 +17,8 @@ limitations under the License. */ #include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" +DEFINE_bool(enable_mkldnn, true, "Enable MKLDNN"); + namespace paddle { namespace inference { namespace analysis { @@ -32,7 +34,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->SwitchIrOptim(); cfg->SwitchSpecifyInputNames(); cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); - cfg->EnableMKLDNN(); + if (FLAGS_enable_mkldnn) cfg->EnableMKLDNN(); } TEST(Analyzer_int8_image_classification, quantization) { @@ -46,7 +48,7 @@ TEST(Analyzer_int8_image_classification, quantization) { std::vector> input_slots_all; SetInputs(&input_slots_all); - if (FLAGS_enable_int8) { + if (FLAGS_enable_mkldnn && FLAGS_enable_int8) { // prepare warmup batch from input data read earlier // warmup batch size can be different than batch size std::shared_ptr> warmup_data = diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc index 91a3233b9851f1def7717d04c4c9df5275a805ee..57ab1b00908b1d4974b8a4de68d013b61555f1a9 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc @@ -17,6 +17,8 @@ limitations under the License. */ #include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" +DEFINE_bool(enable_mkldnn, true, "Enable MKLDNN"); + // setting iterations to 0 means processing the whole dataset namespace paddle { namespace inference { @@ -28,7 +30,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->SwitchIrOptim(true); cfg->SwitchSpecifyInputNames(false); cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); - cfg->EnableMKLDNN(); + if (FLAGS_enable_mkldnn) cfg->EnableMKLDNN(); } std::vector ReadObjectsNum(std::ifstream &file, size_t offset, @@ -268,13 +270,16 @@ TEST(Analyzer_int8_mobilenet_ssd, quantization) { GetWarmupData(input_slots_all); // configure quantizer - q_cfg.EnableMkldnnQuantizer(); - q_cfg.mkldnn_quantizer_config(); - std::unordered_set quantize_operators( - {"conv2d", "depthwise_conv2d", "prior_box", "transpose2", "reshape2"}); - q_cfg.mkldnn_quantizer_config()->SetEnabledOpTypes(quantize_operators); - q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); - q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(FLAGS_warmup_batch_size); + if (FLAGS_enable_mkldnn) { + q_cfg.EnableMkldnnQuantizer(); + q_cfg.mkldnn_quantizer_config(); + std::unordered_set quantize_operators( + {"conv2d", "depthwise_conv2d", "prior_box", "transpose2", "reshape2"}); + q_cfg.mkldnn_quantizer_config()->SetEnabledOpTypes(quantize_operators); + q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); + q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize( + FLAGS_warmup_batch_size); + } // 0 is avg_cost, 1 is top1_acc, 2 is top5_acc or mAP CompareQuantizedAndAnalysis(&cfg, &q_cfg, input_slots_all, 2); diff --git a/paddle/fluid/inference/tests/api/analyzer_quant_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_quant_image_classification_tester.cc index a5a3e60d04b90795f4caf43722e5f7a46e4ed13a..5e867fc87fea3564f22d17ceb421c1b4f3e0e7e3 100644 --- a/paddle/fluid/inference/tests/api/analyzer_quant_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_quant_image_classification_tester.cc @@ -17,6 +17,8 @@ limitations under the License. */ #include "paddle/fluid/inference/api/paddle_analysis_config.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" +DEFINE_bool(enable_mkldnn, true, "Enable MKLDNN"); + namespace paddle { namespace inference { namespace analysis { @@ -27,7 +29,7 @@ void SetConfig(AnalysisConfig *cfg, std::string model_path) { cfg->SwitchIrOptim(false); cfg->SwitchSpecifyInputNames(); cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads); - cfg->EnableMKLDNN(); + if (FLAGS_enable_mkldnn) cfg->EnableMKLDNN(); } template