提交 118ad09e 编写于 作者: J juncaipeng 提交者: Yan Chunwei

fix benchmark threads, test=develop (#1870)

上级 231da325
......@@ -49,7 +49,6 @@ void OutputOptModel(const std::string& load_model_dir,
});
auto predictor = lite_api::CreatePaddlePredictor(config);
// delete old optimized model
int ret = system(
paddle::lite::string_format("rm -rf %s", save_optimized_model_dir.c_str())
.c_str());
......@@ -69,17 +68,13 @@ void Run(const std::vector<std::vector<int64_t>>& input_shapes,
const int thread_num,
const int warmup_times,
const std::string model_name) {
#ifdef LITE_WITH_ARM
lite::DeviceInfo::Init();
lite_api::MobileConfig config;
config.set_threads(thread_num);
if (thread_num == 1) {
lite::DeviceInfo::Global().SetRunMode(LITE_POWER_HIGH, thread_num);
LOG(INFO) << "LITE_POWER_HIGH";
config.set_power_mode(LITE_POWER_HIGH);
} else {
lite::DeviceInfo::Global().SetRunMode(LITE_POWER_NO_BIND, thread_num);
LOG(INFO) << "LITE_POWER_NO_BIND";
config.set_power_mode(LITE_POWER_NO_BIND);
}
#endif
lite_api::MobileConfig config;
config.set_model_dir(model_dir);
auto predictor = lite_api::CreatePaddlePredictor(config);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册