未验证 提交 f1d0a8ae 编写于 作者: J juncaipeng 提交者: GitHub

add int8 benchmark, test=develop (#2338)

上级 32a38a86
......@@ -32,7 +32,9 @@ DEFINE_string(input_shape,
DEFINE_string(result_filename, "", "save test result");
DEFINE_bool(run_model_optimize,
false,
"apply model_optimize_tool to model, use optimized model to test");
"if set true, apply model_optimize_tool to model, use optimized "
"model to test");
DEFINE_bool(is_quantized_model, false, "if set true, test the quantized model");
namespace paddle {
namespace lite_api {
......@@ -42,12 +44,14 @@ void OutputOptModel(const std::string& load_model_dir,
const std::vector<std::vector<int64_t>>& input_shapes) {
lite_api::CxxConfig config;
config.set_model_dir(load_model_dir);
config.set_valid_places({
Place{TARGET(kARM), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kInt8)},
Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kOpenCL), PRECISION(kFloat)},
});
std::vector<Place> vaild_places = {Place{TARGET(kARM), PRECISION(kFloat)},
Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kOpenCL), PRECISION(kFloat)}};
if (FLAGS_is_quantized_model) {
vaild_places.insert(vaild_places.begin(),
Place{TARGET(kARM), PRECISION(kInt8)});
}
config.set_valid_places(vaild_places);
auto predictor = lite_api::CreatePaddlePredictor(config);
int ret = system(
......
......@@ -8,6 +8,7 @@ then
echo "Usage:"
echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename>"
echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename> <is_run_model_optimize: [true|false]>"
echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename> <is_run_model_optimize: [true|false]> <is_run_quantized_model: [trur|false]>"
exit
fi
......@@ -20,6 +21,7 @@ RESULT_FILENAME=$3
WARMUP=10
REPEATS=30
IS_RUN_MODEL_OPTIMIZE=false
IS_RUN_QUANTIZED_MODEL=false
NUM_THREADS_LIST=(1 2 4)
MODELS_LIST=$(ls $MODELS_DIR)
......@@ -28,6 +30,10 @@ if [ $# -gt 3 ];
then
IS_RUN_MODEL_OPTIMIZE=$4
fi
if [ $# -gt 4 ];
then
IS_RUN_QUANTIZED_MODEL=$5
fi
# Adb push benchmark_bin, models
adb push $BENCHMARK_BIN $ANDROID_DIR/benchmark_bin
......@@ -46,7 +52,8 @@ for threads in ${NUM_THREADS_LIST[@]}; do
--repeats=$REPEATS \
--threads=$threads \
--result_filename=$ANDROID_DIR/$RESULT_FILENAME \
--run_model_optimize=$IS_RUN_MODEL_OPTIMIZE"
--run_model_optimize=$IS_RUN_MODEL_OPTIMIZE \
--is_quantized_model=$IS_RUN_QUANTIZED_MODEL"
done
adb shell "echo >> $ANDROID_DIR/$RESULT_FILENAME"
done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册