From 624fa8d48a08ddf1a74a0f835317c6f64e0a3dcb Mon Sep 17 00:00:00 2001 From: xiebaiyuan Date: Fri, 1 Nov 2019 11:13:28 +0800 Subject: [PATCH] add mem opt option for high api ,test = develop (#2319) * add mem opt option for high api ,test = develop * fix is_lod params && test-performance ,test=develop --- mobile/src/io/api_paddle_mobile.cc | 4 ++++ mobile/src/io/paddle_inference_api.h | 1 + mobile/test/net/test_net_performance.cpp | 25 +++++++++++++++++++++--- 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/mobile/src/io/api_paddle_mobile.cc b/mobile/src/io/api_paddle_mobile.cc index 4c6acb2567..8bfc91998f 100644 --- a/mobile/src/io/api_paddle_mobile.cc +++ b/mobile/src/io/api_paddle_mobile.cc @@ -41,6 +41,10 @@ bool PaddleMobilePredictor::Init(const PaddleMobileConfig &config) { if (config.pre_post_type == PaddleMobileConfig::UINT8_255) { configInternal.pre_post_type = PrePostType::UINT8_255; } + + configInternal.memory_optimization_level = + config.mem_opt ? MemoryOptimizationWithoutFeeds : NoMemoryOptimization; + paddle_mobile_.reset(new PaddleMobile(configInternal)); #ifdef PADDLE_MOBILE_CL paddle_mobile_->SetCLPath(config.cl_path); diff --git a/mobile/src/io/paddle_inference_api.h b/mobile/src/io/paddle_inference_api.h index 001b2c9bf0..c89b998144 100644 --- a/mobile/src/io/paddle_inference_api.h +++ b/mobile/src/io/paddle_inference_api.h @@ -221,6 +221,7 @@ struct PaddleMobileConfig : public PaddlePredictor::Config { bool lod_mode = false; int thread_num = 1; bool load_when_predict = false; + bool mem_opt = true; std::string cl_path; struct PaddleModelMemoryPack memory_pack; }; diff --git a/mobile/test/net/test_net_performance.cpp b/mobile/test/net/test_net_performance.cpp index 678558d602..95e72ea7a7 100644 --- a/mobile/test/net/test_net_performance.cpp +++ b/mobile/test/net/test_net_performance.cpp @@ -156,22 +156,41 @@ void test(int argc, char *argv[]) { } // 测速 - auto time5 = time(); + auto max_time = -1; + auto min_time = 100000; + auto all_time = 0; if (is_lod) { for (int i = 0; i < run_times; i++) { + auto time7 = time(); paddle_mobile.Predict(input_lod_tensor); + auto time8 = time(); + const double diff_time_single = time_diff(time7, time8); + max_time = fmax(diff_time_single, max_time); + min_time = fmin(diff_time_single, min_time); + all_time += diff_time_single; } } else { paddle_mobile.Feed(var_names[0], input_tensor); for (int i = 0; i < run_times; i++) { + auto time7 = time(); paddle_mobile.Predict(); + auto time8 = time(); + const double diff_time_single = time_diff(time7, time8); + max_time = fmax(diff_time_single, max_time); + min_time = fmin(diff_time_single, min_time); + all_time += diff_time_single; } } - auto time6 = time(); std::cout << "auto-test" - << " predict-time-cost " << time_diff(time5, time6) / run_times + << " predict-time-cost-avg " << all_time * 1.0f / run_times << "ms" << std::endl; + std::cout << "auto-test" + << " predict-time-cost-max " << double(max_time) << "ms" + << std::endl; + std::cout << "auto-test" + << " predict-time-cost-min " << double(min_time) << "ms" + << std::endl; std::cout << std::endl; } -- GitLab