未验证 提交 0038bfbd 编写于 作者: M Michał Gallus 提交者: GitHub

Prevent loading of warmup data in analyzer_int8 if enable_int8 is set to false (#22857)

上级 4ea95b6f
...@@ -75,8 +75,10 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData( ...@@ -75,8 +75,10 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
PADDLE_ENFORCE_LE(static_cast<size_t>(num_images), all_test_data_size, PADDLE_ENFORCE_LE(static_cast<size_t>(num_images), all_test_data_size,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"The requested quantization warmup data size must be " "The requested quantization warmup data size must be "
"smaller than the test data size. But received warmup " "lower or equal to the test data size. But received"
"size is %d and test data size is %d", "warmup size is %d and test data size is %d. Please "
"use --warmup_batch_size parameter to set smaller "
"warmup batch size.",
num_images, all_test_data_size)); num_images, all_test_data_size));
PaddleTensor images; PaddleTensor images;
...@@ -156,6 +158,7 @@ TEST(Analyzer_int8_image_classification, quantization) { ...@@ -156,6 +158,7 @@ TEST(Analyzer_int8_image_classification, quantization) {
std::vector<std::vector<PaddleTensor>> input_slots_all; std::vector<std::vector<PaddleTensor>> input_slots_all;
SetInput(&input_slots_all); SetInput(&input_slots_all);
if (FLAGS_enable_int8) {
// prepare warmup batch from input data read earlier // prepare warmup batch from input data read earlier
// warmup batch size can be different than batch size // warmup batch size can be different than batch size
std::shared_ptr<std::vector<PaddleTensor>> warmup_data = std::shared_ptr<std::vector<PaddleTensor>> warmup_data =
...@@ -164,7 +167,9 @@ TEST(Analyzer_int8_image_classification, quantization) { ...@@ -164,7 +167,9 @@ TEST(Analyzer_int8_image_classification, quantization) {
// configure quantizer // configure quantizer
q_cfg.EnableMkldnnQuantizer(); q_cfg.EnableMkldnnQuantizer();
q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data); q_cfg.mkldnn_quantizer_config()->SetWarmupData(warmup_data);
q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(FLAGS_warmup_batch_size); q_cfg.mkldnn_quantizer_config()->SetWarmupBatchSize(
FLAGS_warmup_batch_size);
}
CompareQuantizedAndAnalysis(&cfg, &q_cfg, input_slots_all); CompareQuantizedAndAnalysis(&cfg, &q_cfg, input_slots_all);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册