未验证 提交 f1d0a8ae 编写于 作者: J juncaipeng 提交者: GitHub

add int8 benchmark, test=develop (#2338)

上级 32a38a86
...@@ -32,7 +32,9 @@ DEFINE_string(input_shape, ...@@ -32,7 +32,9 @@ DEFINE_string(input_shape,
DEFINE_string(result_filename, "", "save test result"); DEFINE_string(result_filename, "", "save test result");
DEFINE_bool(run_model_optimize, DEFINE_bool(run_model_optimize,
false, false,
"apply model_optimize_tool to model, use optimized model to test"); "if set true, apply model_optimize_tool to model, use optimized "
"model to test");
DEFINE_bool(is_quantized_model, false, "if set true, test the quantized model");
namespace paddle { namespace paddle {
namespace lite_api { namespace lite_api {
...@@ -42,12 +44,14 @@ void OutputOptModel(const std::string& load_model_dir, ...@@ -42,12 +44,14 @@ void OutputOptModel(const std::string& load_model_dir,
const std::vector<std::vector<int64_t>>& input_shapes) { const std::vector<std::vector<int64_t>>& input_shapes) {
lite_api::CxxConfig config; lite_api::CxxConfig config;
config.set_model_dir(load_model_dir); config.set_model_dir(load_model_dir);
config.set_valid_places({ std::vector<Place> vaild_places = {Place{TARGET(kARM), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kFloat)}, Place{TARGET(kX86), PRECISION(kFloat)},
Place{TARGET(kARM), PRECISION(kInt8)}, Place{TARGET(kOpenCL), PRECISION(kFloat)}};
Place{TARGET(kX86), PRECISION(kFloat)}, if (FLAGS_is_quantized_model) {
Place{TARGET(kOpenCL), PRECISION(kFloat)}, vaild_places.insert(vaild_places.begin(),
}); Place{TARGET(kARM), PRECISION(kInt8)});
}
config.set_valid_places(vaild_places);
auto predictor = lite_api::CreatePaddlePredictor(config); auto predictor = lite_api::CreatePaddlePredictor(config);
int ret = system( int ret = system(
......
...@@ -8,6 +8,7 @@ then ...@@ -8,6 +8,7 @@ then
echo "Usage:" echo "Usage:"
echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename>" echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename>"
echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename> <is_run_model_optimize: [true|false]>" echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename> <is_run_model_optimize: [true|false]>"
echo " sh benchmark.sh <benchmark_bin_path> <benchmark_models_path> <result_filename> <is_run_model_optimize: [true|false]> <is_run_quantized_model: [trur|false]>"
exit exit
fi fi
...@@ -20,6 +21,7 @@ RESULT_FILENAME=$3 ...@@ -20,6 +21,7 @@ RESULT_FILENAME=$3
WARMUP=10 WARMUP=10
REPEATS=30 REPEATS=30
IS_RUN_MODEL_OPTIMIZE=false IS_RUN_MODEL_OPTIMIZE=false
IS_RUN_QUANTIZED_MODEL=false
NUM_THREADS_LIST=(1 2 4) NUM_THREADS_LIST=(1 2 4)
MODELS_LIST=$(ls $MODELS_DIR) MODELS_LIST=$(ls $MODELS_DIR)
...@@ -28,6 +30,10 @@ if [ $# -gt 3 ]; ...@@ -28,6 +30,10 @@ if [ $# -gt 3 ];
then then
IS_RUN_MODEL_OPTIMIZE=$4 IS_RUN_MODEL_OPTIMIZE=$4
fi fi
if [ $# -gt 4 ];
then
IS_RUN_QUANTIZED_MODEL=$5
fi
# Adb push benchmark_bin, models # Adb push benchmark_bin, models
adb push $BENCHMARK_BIN $ANDROID_DIR/benchmark_bin adb push $BENCHMARK_BIN $ANDROID_DIR/benchmark_bin
...@@ -46,7 +52,8 @@ for threads in ${NUM_THREADS_LIST[@]}; do ...@@ -46,7 +52,8 @@ for threads in ${NUM_THREADS_LIST[@]}; do
--repeats=$REPEATS \ --repeats=$REPEATS \
--threads=$threads \ --threads=$threads \
--result_filename=$ANDROID_DIR/$RESULT_FILENAME \ --result_filename=$ANDROID_DIR/$RESULT_FILENAME \
--run_model_optimize=$IS_RUN_MODEL_OPTIMIZE" --run_model_optimize=$IS_RUN_MODEL_OPTIMIZE \
--is_quantized_model=$IS_RUN_QUANTIZED_MODEL"
done done
adb shell "echo >> $ANDROID_DIR/$RESULT_FILENAME" adb shell "echo >> $ANDROID_DIR/$RESULT_FILENAME"
done done
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册