From 0038bfbd1d57a83d76622f3830c93589f2b46162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Gallus?= Date: Fri, 6 Mar 2020 15:09:37 +0100 Subject: [PATCH] Prevent loading of warmup data in analyzer_int8 if enable_int8 is set to false (#22857) --- ...alyzer_int8_image_classification_tester.cc | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc index 3e337adbc2a..eb2c7935026 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc @@ -75,8 +75,10 @@ std::shared_ptr> GetWarmupData( PADDLE_ENFORCE_LE(static_cast(num_images), all_test_data_size, platform::errors::InvalidArgument( "The requested quantization warmup data size must be " - "smaller than the test data size. But received warmup " - "size is %d and test data size is %d", + "lower or equal to the test data size. But received" + "warmup size is %d and test data size is %d. Please " + "use --warmup_batch_size parameter to set smaller " + "warmup batch size.", num_images, all_test_data_size)); PaddleTensor images; @@ -156,15 +158,18 @@ TEST(Analyzer_int8_image_classification, quantization) { std::vector> input_slots_all; SetInput(&input_slots_all); - // prepare warmup batch from input data read earlier - // warmup batch size can be different than batch size - std::shared_ptr> warmup_data = - GetWarmupData(input_slots_all); - - // configure quantizer - q_cfg.EnableMkldnnQuantizer(); - q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); - q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(FLAGS_warmup_batch_size); + if (FLAGS_enable_int8) { + // prepare warmup batch from input data read earlier + // warmup batch size can be different than batch size + std::shared_ptr> warmup_data = + GetWarmupData(input_slots_all); + + // configure quantizer + q_cfg.EnableMkldnnQuantizer(); + q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); + q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize( + FLAGS_warmup_batch_size); + } CompareQuantizedAndAnalysis(&cfg, &q_cfg, input_slots_all); } -- GitLab