From 116979a40adf7fe7788f8cd50b9f03c57bcbba7b Mon Sep 17 00:00:00 2001 From: luotao1 Date: Fri, 23 Nov 2018 16:17:56 +0800 Subject: [PATCH] refine api name test=develop --- paddle/fluid/inference/api/analysis_config.cc | 3 ++- paddle/fluid/inference/api/analysis_predictor.cc | 4 ++-- paddle/fluid/inference/api/analysis_predictor.h | 2 +- paddle/fluid/inference/api/api_impl.cc | 2 +- paddle/fluid/inference/api/paddle_api.h | 14 +++++++++----- .../tests/api/analyzer_resnet50_tester.cc | 2 +- paddle/fluid/inference/tests/api/config_printer.h | 2 +- paddle/fluid/inference/tests/api/tester_helper.h | 2 +- 8 files changed, 18 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_config.cc b/paddle/fluid/inference/api/analysis_config.cc index 100ee0c9d..dd75f0d9a 100644 --- a/paddle/fluid/inference/api/analysis_config.cc +++ b/paddle/fluid/inference/api/analysis_config.cc @@ -46,7 +46,7 @@ contrib::AnalysisConfig::AnalysisConfig(const contrib::AnalysisConfig &other) { prog_file = other.prog_file; param_file = other.param_file; specify_input_name = other.specify_input_name; - cpu_num_threads_ = other.cpu_num_threads_; + cpu_math_library_num_threads_ = other.cpu_math_library_num_threads_; // fields from this. enable_ir_optim = other.enable_ir_optim; use_feed_fetch_ops = other.use_feed_fetch_ops; @@ -73,6 +73,7 @@ contrib::AnalysisConfig::AnalysisConfig(contrib::AnalysisConfig &&other) { prog_file = other.prog_file; param_file = other.param_file; specify_input_name = other.specify_input_name; + cpu_math_library_num_threads_ = other.cpu_math_library_num_threads_; // fields from this. enable_ir_optim = other.enable_ir_optim; use_feed_fetch_ops = other.use_feed_fetch_ops; diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 4633a75e5..c132ce326 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -66,7 +66,7 @@ bool AnalysisPredictor::Init( #endif // no matter with or without MKLDNN - paddle::platform::SetNumThreads(config_.GetCPUNumThreads()); + paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); if (!PrepareScope(parent_scope)) { return false; @@ -159,7 +159,7 @@ bool AnalysisPredictor::PrepareExecutor() { return true; } -void AnalysisPredictor::SetMKLDNNThreadId(int tid) { +void AnalysisPredictor::SetMkldnnThreadID(int tid) { #ifdef PADDLE_WITH_MKLDNN platform::set_cur_thread_id(tid); #else diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index 9191970a3..db57812bc 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -69,7 +69,7 @@ class AnalysisPredictor : public PaddlePredictor { framework::Scope *scope() { return scope_.get(); } framework::ProgramDesc &program() { return *inference_program_; } - void SetMKLDNNThreadId(int tid); + void SetMkldnnThreadID(int tid); protected: bool PrepareProgram(const std::shared_ptr &program); diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index c3d17edea..66a8e5139 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -75,7 +75,7 @@ bool NativePaddlePredictor::Init( #endif // no matter with or without MKLDNN - paddle::platform::SetNumThreads(config_.GetCPUNumThreads()); + paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); if (config_.use_gpu) { place_ = paddle::platform::CUDAPlace(config_.device); diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index b7f7781d0..1513a4b3b 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -187,14 +187,18 @@ struct NativeConfig : public PaddlePredictor::Config { // `feeds` and `fetches` of the phase `save_inference_model`. bool specify_input_name{false}; - // Set and get the number of cpu threads. - void SetCPUNumThreads(int cpu_num_threads) { - cpu_num_threads_ = cpu_num_threads; + // Set and get the number of cpu math library threads. + void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads) { + cpu_math_library_num_threads_ = cpu_math_library_num_threads; + } + int cpu_math_library_num_threads() const { + return cpu_math_library_num_threads_; } - int GetCPUNumThreads() const { return cpu_num_threads_; } protected: - int cpu_num_threads_{1}; // number of cpu threads for each instance. + // number of cpu math library (such as MKL, OpenBlas) threads for each + // instance. + int cpu_math_library_num_threads_{1}; }; // A factory to help create different predictors. diff --git a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc index 308a794ca..abc63577b 100644 --- a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc @@ -27,7 +27,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->device = 0; cfg->enable_ir_optim = true; cfg->specify_input_name = true; - cfg->SetCPUNumThreads(FLAGS_paddle_num_threads); + cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); } void SetInput(std::vector> *inputs) { diff --git a/paddle/fluid/inference/tests/api/config_printer.h b/paddle/fluid/inference/tests/api/config_printer.h index a803f5b3f..4231eef72 100644 --- a/paddle/fluid/inference/tests/api/config_printer.h +++ b/paddle/fluid/inference/tests/api/config_printer.h @@ -54,7 +54,7 @@ std::ostream &operator<<(std::ostream &os, const NativeConfig &config) { os << GenSpaces(num_spaces) << "specify_input_name: " << config.specify_input_name << "\n"; os << GenSpaces(num_spaces) - << "cpu_num_threads: " << config.GetCPUNumThreads() << "\n"; + << "cpu_num_threads: " << config.cpu_math_library_num_threads() << "\n"; num_spaces--; os << GenSpaces(num_spaces) << "}\n"; return os; diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index d21567ac1..1dc167840 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -221,7 +221,7 @@ void TestMultiThreadPrediction( #ifdef PADDLE_WITH_MKLDNN if (use_analysis) { static_cast(predictor.get()) - ->SetMKLDNNThreadId(static_cast(tid) + 1); + ->SetMkldnnThreadID(static_cast(tid) + 1); } #endif -- GitLab