提交 acfdbf02 编写于 作者: T tensor-tang

enable ner analysis test and refine lac

上级 df0c6956
...@@ -81,7 +81,7 @@ if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR} AND WITH_TESTING AND WITH_INFERENCE) ...@@ -81,7 +81,7 @@ if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR} AND WITH_TESTING AND WITH_INFERENCE)
endif() endif()
inference_analysis_test(test_analyzer_ner SRCS analyzer_ner_tester.cc inference_analysis_test(test_analyzer_ner SRCS analyzer_ner_tester.cc
EXTRA_DEPS paddle_inference_api paddle_fluid_api EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor
ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model
--infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt) --infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt)
......
...@@ -15,11 +15,9 @@ ...@@ -15,11 +15,9 @@
#include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/analyzer.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/fuse_pass_base.h" #include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/inference/analysis/ut_helper.h"
#include "paddle/fluid/inference/api/analysis_predictor.h" #include "paddle/fluid/inference/api/analysis_predictor.h"
#include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
DEFINE_string(infer_model, "", "model path for LAC"); DEFINE_string(infer_model, "", "model path for LAC");
...@@ -160,7 +158,7 @@ void TestLACPrediction(const std::string &model_path, ...@@ -160,7 +158,7 @@ void TestLACPrediction(const std::string &model_path,
config.use_gpu = false; config.use_gpu = false;
config.device = 0; config.device = 0;
config.specify_input_name = true; config.specify_input_name = true;
std::vector<PaddleTensor> input_slots, outputs_slots, ref_outputs_slots; std::vector<PaddleTensor> input_slots, outputs_slots;
DataRecord data(data_file, batch_size); DataRecord data(data_file, batch_size);
GetOneBatch(&input_slots, &data, batch_size); GetOneBatch(&input_slots, &data, batch_size);
std::unique_ptr<PaddlePredictor> predictor; std::unique_ptr<PaddlePredictor> predictor;
...@@ -217,6 +215,7 @@ void TestLACPrediction(const std::string &model_path, ...@@ -217,6 +215,7 @@ void TestLACPrediction(const std::string &model_path,
// run once for comparion as reference // run once for comparion as reference
auto ref_predictor = auto ref_predictor =
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config); CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
std::vector<PaddleTensor> ref_outputs_slots;
ref_predictor->Run(input_slots, &ref_outputs_slots); ref_predictor->Run(input_slots, &ref_outputs_slots);
EXPECT_EQ(ref_outputs_slots.size(), outputs_slots.size()); EXPECT_EQ(ref_outputs_slots.size(), outputs_slots.size());
auto &ref_out = ref_outputs_slots[0]; auto &ref_out = ref_outputs_slots[0];
...@@ -246,9 +245,10 @@ void TestLACPrediction(const std::string &model_path, ...@@ -246,9 +245,10 @@ void TestLACPrediction(const std::string &model_path,
} }
LOG(INFO) << "has num ops: " << num_ops; LOG(INFO) << "has num ops: " << num_ops;
ASSERT_TRUE(fuse_statis.count("fc_fuse")); ASSERT_TRUE(fuse_statis.count("fc_fuse"));
// ASSERT_TRUE(fuse_statis.count("fc_gru_fuse")); ASSERT_TRUE(fuse_statis.count("fc_gru_fuse"));
LOG(INFO) << "fc fuse num:" << fuse_statis.at("fc_fuse"); EXPECT_EQ(fuse_statis.at("fc_fuse"), 1);
// LOG(INFO) << "fc gru fuse num:" << fuse_statis.at("fc_gru_fuse"); EXPECT_EQ(fuse_statis.at("fc_gru_fuse"), 4);
EXPECT_EQ(num_ops, 11);
} }
} }
......
...@@ -13,12 +13,11 @@ ...@@ -13,12 +13,11 @@
// limitations under the License. // limitations under the License.
#include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/analyzer.h"
#include <google/protobuf/text_format.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/ir/fuse_pass_base.h"
#include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/api/analysis_predictor.h"
#include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h"
#include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/platform/profiler.h"
DEFINE_string(infer_model, "", "model path"); DEFINE_string(infer_model, "", "model path");
...@@ -112,7 +111,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data, ...@@ -112,7 +111,7 @@ void PrepareInputs(std::vector<PaddleTensor> *input_slots, DataRecord *data,
const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26, const int chinese_ner_result_data[] = {30, 45, 41, 48, 17, 26,
48, 39, 38, 16, 25}; 48, 39, 38, 16, 25};
void TestChineseNERPrediction() { void TestChineseNERPrediction(bool use_analysis) {
NativeConfig config; NativeConfig config;
config.prog_file = FLAGS_infer_model + "/__model__"; config.prog_file = FLAGS_infer_model + "/__model__";
config.param_file = FLAGS_infer_model + "/param"; config.param_file = FLAGS_infer_model + "/param";
...@@ -120,11 +119,23 @@ void TestChineseNERPrediction() { ...@@ -120,11 +119,23 @@ void TestChineseNERPrediction() {
config.device = 0; config.device = 0;
config.specify_input_name = true; config.specify_input_name = true;
auto predictor = std::vector<PaddleTensor> input_slots, outputs;
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config); std::unique_ptr<PaddlePredictor> predictor;
std::vector<PaddleTensor> input_slots;
std::vector<PaddleTensor> outputs;
Timer timer; Timer timer;
if (use_analysis) {
AnalysisConfig cfg;
cfg.prog_file = FLAGS_infer_model + "/__model__";
cfg.param_file = FLAGS_infer_model + "/param";
cfg.use_gpu = false;
cfg.device = 0;
cfg.specify_input_name = true;
cfg.enable_ir_optim = true;
predictor =
CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(cfg);
} else {
predictor =
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
}
if (FLAGS_test_all_data) { if (FLAGS_test_all_data) {
LOG(INFO) << "test all data"; LOG(INFO) << "test all data";
...@@ -165,10 +176,51 @@ void TestChineseNERPrediction() { ...@@ -165,10 +176,51 @@ void TestChineseNERPrediction() {
for (size_t i = 0; i < std::min(11UL, size); i++) { for (size_t i = 0; i < std::min(11UL, size); i++) {
PADDLE_ENFORCE(result[i], chinese_ner_result_data[i]); PADDLE_ENFORCE(result[i], chinese_ner_result_data[i]);
} }
if (use_analysis) {
// run once for comparion as reference
auto ref_predictor =
CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
std::vector<PaddleTensor> ref_outputs_slots;
ref_predictor->Run(input_slots, &ref_outputs_slots);
EXPECT_EQ(ref_outputs_slots.size(), outputs.size());
auto &ref_out = ref_outputs_slots[0];
size_t ref_size =
std::accumulate(ref_out.shape.begin(), ref_out.shape.end(), 1,
[](int a, int b) { return a * b; });
EXPECT_EQ(size, ref_size);
int64_t *pdata_ref = static_cast<int64_t *>(ref_out.data.data());
for (size_t i = 0; i < size; ++i) {
EXPECT_EQ(pdata_ref[i], result[i]);
}
AnalysisPredictor *analysis_predictor =
dynamic_cast<AnalysisPredictor *>(predictor.get());
auto &fuse_statis = analysis_predictor->analysis_argument()
.Get<std::unordered_map<std::string, int>>(
framework::ir::kFuseStatisAttr);
for (auto &item : fuse_statis) {
LOG(INFO) << "fused " << item.first << " " << item.second;
}
int num_ops = 0;
for (auto &node :
analysis_predictor->analysis_argument().main_dfg->nodes.nodes()) {
if (node->IsFunction()) {
++num_ops;
}
}
LOG(INFO) << "has num ops: " << num_ops;
ASSERT_TRUE(fuse_statis.count("fc_fuse"));
ASSERT_TRUE(fuse_statis.count("fc_gru_fuse"));
EXPECT_EQ(fuse_statis.at("fc_fuse"), 1);
EXPECT_EQ(fuse_statis.at("fc_gru_fuse"), 2);
EXPECT_EQ(num_ops, 14);
}
} }
// Directly infer with the original model. TEST(Analyzer_Chinese_ner, native) { TestChineseNERPrediction(false); }
TEST(Analyzer, Chinese_ner) { TestChineseNERPrediction(); }
TEST(Analyzer_Chinese_ner, analysis) { TestChineseNERPrediction(true); }
} // namespace inference } // namespace inference
} // namespace paddle } // namespace paddle
...@@ -283,7 +283,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, ...@@ -283,7 +283,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
base_predictor->Run(input_slots, &base_outputs); base_predictor->Run(input_slots, &base_outputs);
LOG(INFO) << "===========profile result===========";
if (num_threads == 1) { if (num_threads == 1) {
// Prepare inputs. // Prepare inputs.
Timer timer; Timer timer;
...@@ -324,7 +323,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir, ...@@ -324,7 +323,6 @@ void TestDituRNNPrediction(bool use_analysis, bool activate_ir,
threads[i].join(); threads[i].join();
} }
} }
LOG(INFO) << "=====================================";
if (use_analysis && activate_ir) { if (use_analysis && activate_ir) {
AnalysisPredictor *analysis_predictor = AnalysisPredictor *analysis_predictor =
......
...@@ -44,20 +44,7 @@ function(inference_api_test TARGET_NAME) ...@@ -44,20 +44,7 @@ function(inference_api_test TARGET_NAME)
endfunction(inference_api_test) endfunction(inference_api_test)
cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor) cc_library(paddle_inference_api SRCS api.cc api_impl.cc helper.cc DEPS lod_tensor)
cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api cc_library(analysis_predictor SRCS analysis_predictor.cc DEPS paddle_inference_api analysis)
analysis
ir_pass_manager
pass
fc_fuse_pass
fc_lstm_fuse_pass
fc_gru_fuse_pass
seq_concat_fc_fuse_pass
graph_viz_pass
infer_clean_graph_pass
graph_pattern_detector
infer_clean_graph_pass
attention_lstm_fuse_pass)
cc_test(test_paddle_inference_api cc_test(test_paddle_inference_api
SRCS api_tester.cc SRCS api_tester.cc
DEPS paddle_inference_api) DEPS paddle_inference_api)
......
...@@ -124,11 +124,9 @@ std::string DescribeTensor(const PaddleTensor &tensor) { ...@@ -124,11 +124,9 @@ std::string DescribeTensor(const PaddleTensor &tensor) {
void PrintTime(int batch_size, int repeat, int num_threads, int tid, void PrintTime(int batch_size, int repeat, int num_threads, int tid,
double latency) { double latency) {
LOG(INFO) << "====================================="; LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat
LOG(INFO) << "batch_size: " << batch_size << ", repeat: " << repeat
<< ", threads: " << num_threads << ", thread id: " << tid << ", threads: " << num_threads << ", thread id: " << tid
<< ", latency: " << latency << "ms"; << ", latency: " << latency << "ms ======";
LOG(INFO) << "=====================================";
} }
} // namespace inference } // namespace inference
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册