From de26df440b7361c3a15ae3dd8022e6abd9be13c9 Mon Sep 17 00:00:00 2001 From: lijianshe02 <48898730+lijianshe02@users.noreply.github.com> Date: Mon, 15 Apr 2019 12:10:50 +0800 Subject: [PATCH] =?UTF-8?q?add=20SaveOptimModel=20interface=20in=20analysi?= =?UTF-8?q?s=5Fpredictor.h=20and=20test=20it=20in=20a=E2=80=A6=20(#16441)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add SaveOptimModel interface in analysis_predictor.h and test it in analyzer_dam_tester and analyzer_resnet50_tester test=develop --- .../fluid/inference/api/analysis_predictor.cc | 39 ++++++++++++++ .../fluid/inference/api/analysis_predictor.h | 4 ++ .../tests/api/analyzer_dam_tester.cc | 48 +++++++++++++++++ .../tests/api/analyzer_resnet50_tester.cc | 51 ++++++++++++++++++- 4 files changed, 141 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 0155609a02..fcab1ab186 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -832,6 +832,45 @@ std::string AnalysisPredictor::GetSerializedProgram() const { return inference_program_->Proto()->SerializeAsString(); } +// Add SaveOptimModel +void AnalysisPredictor::SaveOptimModel(const std::string &dir) { + // save model + std::string model_name = dir + "/model"; + std::ofstream outfile; + outfile.open(model_name, std::ios::out | std::ios::binary); + std::string inference_prog_desc = GetSerializedProgram(); + outfile << inference_prog_desc; + // save params + framework::ProgramDesc save_program; + auto *save_block = save_program.MutableBlock(0); + + const framework::ProgramDesc &main_program = program(); + const framework::BlockDesc &global_block = main_program.Block(0); + std::vector save_var_list; + for (framework::VarDesc *var : global_block.AllVars()) { + if (IsPersistable(var)) { + framework::VarDesc *new_var = save_block->Var(var->Name()); + new_var->SetShape(var->GetShape()); + new_var->SetDataType(var->GetDataType()); + new_var->SetType(var->GetType()); + new_var->SetLoDLevel(var->GetLoDLevel()); + new_var->SetPersistable(true); + + save_var_list.push_back(new_var->Name()); + } + } + std::sort(save_var_list.begin(), save_var_list.end()); + auto *op = save_block->AppendOp(); + op->SetType("save_combine"); + op->SetInput("X", save_var_list); + op->SetAttr("file_path", dir + "/params"); + op->CheckAttrs(); + + platform::CPUPlace place; + framework::Executor exe(place); + exe.Run(save_program, scope(), 0, true, true); +} + template <> std::unique_ptr CreatePaddlePredictor( const AnalysisConfig &config) { diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index e4c537f426..b5e134ced7 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -86,6 +86,10 @@ class AnalysisPredictor : public PaddlePredictor { bool MkldnnQuantize(); + // save program to model + // save parameters to params + void SaveOptimModel(const std::string &dir); + protected: // For memory optimization. bool need_collect_var_shapes_for_memory_optim(); diff --git a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc index e10d239a5d..c9da5b3ea5 100644 --- a/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_dam_tester.cc @@ -170,6 +170,15 @@ void SetConfig(AnalysisConfig *cfg) { cfg->SwitchIrOptim(true); } +void SetOptimConfig(AnalysisConfig *cfg) { + std::string optimModelPath = + FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) + + "/saved_optim_model"; + cfg->SetModel(optimModelPath + "/model", optimModelPath + "/params"); + cfg->SwitchIrOptim(true); + cfg->SwitchSpecifyInputNames(); +} + void SetInput(std::vector> *inputs) { DataRecord data(FLAGS_infer_data, FLAGS_batch_size); std::vector input_slots; @@ -315,5 +324,44 @@ TEST(Analyzer_dam, compare_determine) { input_slots_all); } +// Save optim model +TEST(Analyzer_dam, save_optim_model) { + AnalysisConfig cfg; + SetConfig(&cfg); + std::string optimModelPath = + FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) + + "/saved_optim_model"; + mkdir(optimModelPath.c_str(), 0777); + auto predictor = CreateTestPredictor( + reinterpret_cast(&cfg), + FLAGS_use_analysis); + (static_cast(predictor.get())) + ->SaveOptimModel(optimModelPath); +} + +void CompareOptimAndOrig(const PaddlePredictor::Config *orig_config, + const PaddlePredictor::Config *optim_config, + const std::vector> &inputs) { + PrintConfig(orig_config, true); + PrintConfig(optim_config, true); + std::vector> orig_outputs, optim_outputs; + TestOneThreadPrediction(orig_config, inputs, &orig_outputs, false); + TestOneThreadPrediction(optim_config, inputs, &optim_outputs, false); + CompareResult(orig_outputs.back(), optim_outputs.back()); +} + +TEST(Analyzer_dam, compare_optim_orig) { + AnalysisConfig orig_cfg; + AnalysisConfig optim_cfg; + SetConfig(&orig_cfg); + SetOptimConfig(&optim_cfg); + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareOptimAndOrig( + reinterpret_cast(&orig_cfg), + reinterpret_cast(&optim_cfg), + input_slots_all); +} + } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc index d4330e6cdd..588c80aa60 100644 --- a/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_resnet50_tester.cc @@ -32,6 +32,17 @@ void SetInput(std::vector> *inputs) { SetFakeImageInput(inputs, FLAGS_infer_model); } +void SetOptimConfig(AnalysisConfig *cfg) { + std::string optimModelPath = + FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) + + "/saved_optim_model"; + cfg->SetModel(optimModelPath + "/model", optimModelPath + "/params"); + cfg->DisableGpu(); + cfg->SwitchIrOptim(); + cfg->SwitchSpecifyInputNames(); + cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); +} + // Easy for profiling independently. void profile(bool use_mkldnn = false) { AnalysisConfig cfg; @@ -87,13 +98,51 @@ TEST(Analyzer_resnet50, compare_mkldnn) { compare(true /* use_mkldnn */); } TEST(Analyzer_resnet50, compare_determine) { AnalysisConfig cfg; SetConfig(&cfg); - std::vector> input_slots_all; SetInput(&input_slots_all); CompareDeterministic(reinterpret_cast(&cfg), input_slots_all); } +// Save optim model +TEST(Analyzer_resnet50, save_optim_model) { + AnalysisConfig cfg; + SetConfig(&cfg); + std::string optimModelPath = + FLAGS_infer_model.substr(0, FLAGS_infer_model.find_last_of("/")) + + "/saved_optim_model"; + mkdir(optimModelPath.c_str(), 0777); + auto predictor = CreateTestPredictor( + reinterpret_cast(&cfg), + FLAGS_use_analysis); + (static_cast(predictor.get())) + ->SaveOptimModel(optimModelPath); +} + +void CompareOptimAndOrig(const PaddlePredictor::Config *orig_config, + const PaddlePredictor::Config *optim_config, + const std::vector> &inputs) { + PrintConfig(orig_config, true); + PrintConfig(optim_config, true); + std::vector> orig_outputs, optim_outputs; + TestOneThreadPrediction(orig_config, inputs, &orig_outputs, false); + TestOneThreadPrediction(optim_config, inputs, &optim_outputs, false); + CompareResult(orig_outputs.back(), optim_outputs.back()); +} + +TEST(Analyzer_resnet50, compare_optim_orig) { + AnalysisConfig orig_cfg; + AnalysisConfig optim_cfg; + SetConfig(&orig_cfg); + SetOptimConfig(&optim_cfg); + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareOptimAndOrig( + reinterpret_cast(&orig_cfg), + reinterpret_cast(&optim_cfg), + input_slots_all); +} + } // namespace analysis } // namespace inference } // namespace paddle -- GitLab