diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc index fbf67d933786e3ee2baab7a20911da2837cdce4d..ae78e07304f1d0e9905f78498f3a0c5ca2a64fe7 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_image_classification_tester.cc @@ -138,7 +138,7 @@ void SetInput(std::vector> *inputs, } } -TEST(Analyzer_int8_resnet50, quantization) { +TEST(Analyzer_int8_image_classification, quantization) { AnalysisConfig cfg; SetConfig(&cfg); diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index eb786196a88482817617f0156327be95e67bd4ad..05936458cefbadecb0ab15afdc3b48d0fd4d64ce 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -336,14 +336,20 @@ void PredictionRun(PaddlePredictor *predictor, #ifdef WITH_GPERFTOOLS ProfilerStart("paddle_inference.prof"); #endif + int predicted_num = 0; if (!FLAGS_zero_copy) { - run_timer.tic(); for (int i = 0; i < iterations; i++) { + run_timer.tic(); for (int j = 0; j < num_times; j++) { predictor->Run(inputs[i], &(*outputs)[i], FLAGS_batch_size); } + elapsed_time += run_timer.toc(); + + predicted_num += FLAGS_batch_size; + if (predicted_num % 100 == 0) { + LOG(INFO) << predicted_num << " samples"; + } } - elapsed_time = run_timer.toc(); } else { for (int i = 0; i < iterations; i++) { ConvertPaddleTensorToZeroCopyTensor(predictor, inputs[i]); @@ -352,8 +358,14 @@ void PredictionRun(PaddlePredictor *predictor, predictor->ZeroCopyRun(); } elapsed_time += run_timer.toc(); + + predicted_num += FLAGS_batch_size; + if (predicted_num % 100 == 0) { + LOG(INFO) << predicted_num << " samples"; + } } } + #ifdef WITH_GPERFTOOLS ProfilerStop(); #endif