diff --git a/lite/api/benchmark.cc b/lite/api/benchmark.cc index a423cd07a2e53d9983706864fd86d7f3d1918ec0..462a5e2381acf3cc86ca81002a282933f01ee049 100644 --- a/lite/api/benchmark.cc +++ b/lite/api/benchmark.cc @@ -32,7 +32,9 @@ DEFINE_string(input_shape, DEFINE_string(result_filename, "", "save test result"); DEFINE_bool(run_model_optimize, false, - "apply model_optimize_tool to model, use optimized model to test"); + "if set true, apply model_optimize_tool to model, use optimized " + "model to test"); +DEFINE_bool(is_quantized_model, false, "if set true, test the quantized model"); namespace paddle { namespace lite_api { @@ -42,12 +44,14 @@ void OutputOptModel(const std::string& load_model_dir, const std::vector>& input_shapes) { lite_api::CxxConfig config; config.set_model_dir(load_model_dir); - config.set_valid_places({ - Place{TARGET(kARM), PRECISION(kFloat)}, - Place{TARGET(kARM), PRECISION(kInt8)}, - Place{TARGET(kX86), PRECISION(kFloat)}, - Place{TARGET(kOpenCL), PRECISION(kFloat)}, - }); + std::vector vaild_places = {Place{TARGET(kARM), PRECISION(kFloat)}, + Place{TARGET(kX86), PRECISION(kFloat)}, + Place{TARGET(kOpenCL), PRECISION(kFloat)}}; + if (FLAGS_is_quantized_model) { + vaild_places.insert(vaild_places.begin(), + Place{TARGET(kARM), PRECISION(kInt8)}); + } + config.set_valid_places(vaild_places); auto predictor = lite_api::CreatePaddlePredictor(config); int ret = system( diff --git a/lite/tools/benchmark.sh b/lite/tools/benchmark.sh index c3261c6d4409842d6821179eb8b4e404a28d4c6b..683271fa8f5c97a39099429ed003ba7414de1132 100644 --- a/lite/tools/benchmark.sh +++ b/lite/tools/benchmark.sh @@ -8,6 +8,7 @@ then echo "Usage:" echo " sh benchmark.sh " echo " sh benchmark.sh " + echo " sh benchmark.sh " exit fi @@ -20,6 +21,7 @@ RESULT_FILENAME=$3 WARMUP=10 REPEATS=30 IS_RUN_MODEL_OPTIMIZE=false +IS_RUN_QUANTIZED_MODEL=false NUM_THREADS_LIST=(1 2 4) MODELS_LIST=$(ls $MODELS_DIR) @@ -28,6 +30,10 @@ if [ $# -gt 3 ]; then IS_RUN_MODEL_OPTIMIZE=$4 fi +if [ $# -gt 4 ]; +then + IS_RUN_QUANTIZED_MODEL=$5 +fi # Adb push benchmark_bin, models adb push $BENCHMARK_BIN $ANDROID_DIR/benchmark_bin @@ -46,7 +52,8 @@ for threads in ${NUM_THREADS_LIST[@]}; do --repeats=$REPEATS \ --threads=$threads \ --result_filename=$ANDROID_DIR/$RESULT_FILENAME \ - --run_model_optimize=$IS_RUN_MODEL_OPTIMIZE" + --run_model_optimize=$IS_RUN_MODEL_OPTIMIZE \ + --is_quantized_model=$IS_RUN_QUANTIZED_MODEL" done adb shell "echo >> $ANDROID_DIR/$RESULT_FILENAME" done