From acfdbf029330e60037e4fff7cee9c00d99f031c5 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 7 Sep 2018 15:56:51 +0800 Subject: [PATCH] enable ner analysis test and refine lac --- .../fluid/inference/analysis/CMakeLists.txt | 2 +- .../inference/analysis/analyzer_lac_tester.cc | 14 ++-- .../inference/analysis/analyzer_ner_tester.cc | 74 ++++++++++++++++--- .../inference/analysis/analyzer_tester.cc | 2 - paddle/fluid/inference/api/CMakeLists.txt | 15 +--- paddle/fluid/inference/api/helper.h | 6 +- 6 files changed, 74 insertions(+), 39 deletions(-) diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index ef55a0c28a0..a115bc8f4a3 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -81,7 +81,7 @@ if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR} AND WITH_TESTING AND WITH_INFERENCE) endif() inference_analysis_test(test_analyzer_ner SRCS analyzer_ner_tester.cc - EXTRA_DEPS paddle_inference_api paddle_fluid_api + EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model --infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt) diff --git a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc index 56f773bf218..4ff7251473f 100644 --- a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc @@ -15,11 +15,9 @@ #include "paddle/fluid/inference/analysis/analyzer.h" #include #include "paddle/fluid/framework/ir/fuse_pass_base.h" -#include "paddle/fluid/framework/ir/pass.h" -#include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/api/helper.h" -#include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_pass.h" #include "paddle/fluid/platform/profiler.h" DEFINE_string(infer_model, "", "model path for LAC"); @@ -160,7 +158,7 @@ void TestLACPrediction(const std::string &model_path, config.use_gpu = false; config.device = 0; config.specify_input_name = true; - std::vector input_slots, outputs_slots, ref_outputs_slots; + std::vector input_slots, outputs_slots; DataRecord data(data_file, batch_size); GetOneBatch(&input_slots, &data, batch_size); std::unique_ptr predictor; @@ -217,6 +215,7 @@ void TestLACPrediction(const std::string &model_path, // run once for comparion as reference auto ref_predictor = CreatePaddlePredictor(config); + std::vector ref_outputs_slots; ref_predictor->Run(input_slots, &ref_outputs_slots); EXPECT_EQ(ref_outputs_slots.size(), outputs_slots.size()); auto &ref_out = ref_outputs_slots[0]; @@ -246,9 +245,10 @@ void TestLACPrediction(const std::string &model_path, } LOG(INFO) << "has num ops: " << num_ops; ASSERT_TRUE(fuse_statis.count("fc_fuse")); - // ASSERT_TRUE(fuse_statis.count("fc_gru_fuse")); - LOG(INFO) << "fc fuse num:" << fuse_statis.at("fc_fuse"); - // LOG(INFO) << "fc gru fuse num:" << fuse_statis.at("fc_gru_fuse"); + ASSERT_TRUE(fuse_statis.count("fc_gru_fuse")); + EXPECT_EQ(fuse_statis.at("fc_fuse"), 1); + EXPECT_EQ(fuse_statis.at("fc_gru_fuse"), 4); + EXPECT_EQ(num_ops, 11); } } diff --git a/paddle/fluid/inference/analysis/analyzer_ner_tester.cc b/paddle/fluid/inference/analysis/analyzer_ner_tester.cc index eaae09b051f..f5c5d73aeb4 100644 --- a/paddle/fluid/inference/analysis/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_ner_tester.cc @@ -13,12 +13,11 @@ // limitations under the License. #include "paddle/fluid/inference/analysis/analyzer.h" -#include #include -#include "paddle/fluid/framework/ir/pass.h" -#include "paddle/fluid/inference/analysis/ut_helper.h" +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/api/helper.h" -#include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/inference/api/paddle_inference_pass.h" #include "paddle/fluid/platform/profiler.h" DEFINE_string(infer_model, "", "model path"); @@ -112,7 +111,7 @@ void PrepareInputs(std::vector *input_slots, DataRecord *data, const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26, 48, 39, 38, 16, 25}; -void TestChineseNERPrediction() { +void TestChineseNERPrediction(bool use_analysis) { NativeConfig config; config.prog_file = FLAGS_infer_model + "/__model__"; config.param_file = FLAGS_infer_model + "/param"; @@ -120,11 +119,23 @@ void TestChineseNERPrediction() { config.device = 0; config.specify_input_name = true; - auto predictor = - CreatePaddlePredictor(config); - std::vector input_slots; - std::vector outputs; + std::vector input_slots, outputs; + std::unique_ptr predictor; Timer timer; + if (use_analysis) { + AnalysisConfig cfg; + cfg.prog_file = FLAGS_infer_model + "/__model__"; + cfg.param_file = FLAGS_infer_model + "/param"; + cfg.use_gpu = false; + cfg.device = 0; + cfg.specify_input_name = true; + cfg.enable_ir_optim = true; + predictor = + CreatePaddlePredictor(cfg); + } else { + predictor = + CreatePaddlePredictor(config); + } if (FLAGS_test_all_data) { LOG(INFO) << "test all data"; @@ -165,10 +176,51 @@ void TestChineseNERPrediction() { for (size_t i = 0; i < std::min(11UL, size); i++) { PADDLE_ENFORCE(result[i], chinese_ner_result_data[i]); } + + if (use_analysis) { + // run once for comparion as reference + auto ref_predictor = + CreatePaddlePredictor(config); + std::vector ref_outputs_slots; + ref_predictor->Run(input_slots, &ref_outputs_slots); + EXPECT_EQ(ref_outputs_slots.size(), outputs.size()); + auto &ref_out = ref_outputs_slots[0]; + size_t ref_size = + std::accumulate(ref_out.shape.begin(), ref_out.shape.end(), 1, + [](int a, int b) { return a * b; }); + EXPECT_EQ(size, ref_size); + int64_t *pdata_ref = static_cast(ref_out.data.data()); + for (size_t i = 0; i < size; ++i) { + EXPECT_EQ(pdata_ref[i], result[i]); + } + + AnalysisPredictor *analysis_predictor = + dynamic_cast(predictor.get()); + auto &fuse_statis = analysis_predictor->analysis_argument() + .Get>( + framework::ir::kFuseStatisAttr); + for (auto &item : fuse_statis) { + LOG(INFO) << "fused " << item.first << " " << item.second; + } + int num_ops = 0; + for (auto &node : + analysis_predictor->analysis_argument().main_dfg->nodes.nodes()) { + if (node->IsFunction()) { + ++num_ops; + } + } + LOG(INFO) << "has num ops: " << num_ops; + ASSERT_TRUE(fuse_statis.count("fc_fuse")); + ASSERT_TRUE(fuse_statis.count("fc_gru_fuse")); + EXPECT_EQ(fuse_statis.at("fc_fuse"), 1); + EXPECT_EQ(fuse_statis.at("fc_gru_fuse"), 2); + EXPECT_EQ(num_ops, 14); + } } -// Directly infer with the original model. -TEST(Analyzer, Chinese_ner) { TestChineseNERPrediction(); } +TEST(Analyzer_Chinese_ner, native) { TestChineseNERPrediction(false); } + +TEST(Analyzer_Chinese_ner, analysis) { TestChineseNERPrediction(true); } } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc index 4cf26d3c70e..a496ae41aa0 100644 --- a/paddle/fluid/inference/analysis/analyzer_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -283,7 +283,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, base_predictor->Run(input_slots, &base_outputs); - LOG(INFO) << "===========profile result==========="; if (num_threads == 1) { // Prepare inputs. Timer timer; @@ -324,7 +323,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, threads[i].join(); } } - LOG(INFO) << "====================================="; if (use_analysis && activate_ir) { AnalysisPredictor *analysis_predictor = diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt index f944c9fdec7..5df486f345a 100644 --- a/paddle/fluid/inference/api/CMakeLists.txt +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -44,20 +44,7 @@ function(inference_api_test TARGET_NAME) endfunction(inference_api_test) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor) -cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api - analysis - ir_pass_manager - pass - fc_fuse_pass - fc_lstm_fuse_pass - fc_gru_fuse_pass - seq_concat_fc_fuse_pass - graph_viz_pass - infer_clean_graph_pass - graph_pattern_detector - infer_clean_graph_pass - attention_lstm_fuse_pass) - +cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis) cc_test(test_paddle_inference_api SRCS api_tester.cc DEPS paddle_inference_api) diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 0ab2542f34e..f6893be428f 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -124,11 +124,9 @@ std::string DescribeTensor(const PaddleTensor &tensor) { void PrintTime(int batch_size, int repeat, int num_threads, int tid, double latency) { - LOG(INFO) << "====================================="; - LOG(INFO) << "batch_size: " << batch_size << ", repeat: " << repeat + LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat << ", threads: " << num_threads << ", thread id: " << tid - << ", latency: " << latency << "ms"; - LOG(INFO) << "====================================="; + << ", latency: " << latency << "ms ======"; } } // namespace inference -- GitLab