From 63b38ca40bf0185fee52506690ad051b809833d1 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 31 Aug 2018 20:37:11 +0800 Subject: [PATCH] add lac test --- .../fluid/inference/analysis/CMakeLists.txt | 17 +- .../inference/analysis/analyzer_lac_tester.cc | 196 ++++++++++++++++++ 2 files changed, 211 insertions(+), 2 deletions(-) create mode 100644 paddle/fluid/inference/analysis/analyzer_lac_tester.cc diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index 817e36401..cc0dd0d49 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -50,7 +50,7 @@ endfunction(inference_download_and_uncompress) set(DITU_RNN_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fmodel.tar.gz") set(DITU_RNN_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/ditu_rnn_fluid%2Fdata.txt.tar.gz") set(DITU_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/ditu_rnn" CACHE PATH "Ditu RNN model and data root." FORCE) -if (NOT EXISTS ${DITU_INSTALL_DIR}) +if (NOT EXISTS ${DITU_INSTALL_DIR} AND WITH_TESTING) inference_download_and_uncompress(${DITU_INSTALL_DIR} ${DITU_RNN_MODEL_URL} "ditu_rnn_fluid%2Fmodel.tar.gz") inference_download_and_uncompress(${DITU_INSTALL_DIR} ${DITU_RNN_DATA_URL} "ditu_rnn_fluid%2Fdata.txt.tar.gz") endif() @@ -86,7 +86,7 @@ inference_analysis_test(test_model_store_pass SRCS model_store_pass_tester.cc) set(CHINESE_NER_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/chinese_ner_model.tar.gz") set(CHINESE_NER_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/chinese_ner-data.txt.tar.gz") set(CHINESE_NER_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/chinese_ner" CACHE PATH "Chinese ner model and data root." FORCE) -if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR}) +if (NOT EXISTS ${CHINESE_NER_INSTALL_DIR} AND WITH_TESTING) inference_download_and_uncompress(${CHINESE_NER_INSTALL_DIR} ${CHINESE_NER_MODEL_URL} "chinese_ner_model.tar.gz") inference_download_and_uncompress(${CHINESE_NER_INSTALL_DIR} ${CHINESE_NER_DATA_URL} "chinese_ner-data.txt.tar.gz") endif() @@ -95,3 +95,16 @@ inference_analysis_test(test_analyzer_ner SRCS analyzer_ner_tester.cc EXTRA_DEPS paddle_inference_api paddle_fluid_api ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model --infer_data=${CHINESE_NER_INSTALL_DIR}/data.txt) + +set(LAC_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/lac_model.tar.gz") +set(LAC_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/lac_data.txt.tar.gz") +set(LAC_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/lac" CACHE PATH "LAC model and data root." FORCE) +if (NOT EXISTS ${LAC_INSTALL_DIR} AND WITH_TESTING) + inference_download_and_uncompress(${LAC_INSTALL_DIR} ${LAC_MODEL_URL} "lac_model.tar.gz") + inference_download_and_uncompress(${LAC_INSTALL_DIR} ${LAC_DATA_URL} "lac_data.txt.tar.gz") +endif() + +inference_analysis_test(test_analyzer_lac SRCS analyzer_lac_tester.cc + EXTRA_DEPS paddle_inference_api paddle_fluid_api + ARGS --infer_model=${LAC_INSTALL_DIR}/model + --infer_data=${LAC_INSTALL_DIR}/data.txt) diff --git a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc new file mode 100644 index 000000000..9f04026bd --- /dev/null +++ b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc @@ -0,0 +1,196 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/inference/analysis/analyzer.h" +#include +#include +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" +#include "paddle/fluid/inference/api/helper.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/platform/profiler.h" +DEFINE_string(infer_model, "", "model path for LAC"); +DEFINE_string(infer_data, "", "data file for LAC"); +DEFINE_int32(batch_size, 1, "batch size."); +DEFINE_int32(burning, 0, "Burning before repeat."); +DEFINE_int32(repeat, 1, "Running the inference program repeat times."); +DEFINE_bool(test_all_data, false, "Test the all dataset in data file."); +namespace paddle { +namespace inference { +namespace analysis { +struct DataRecord { + std::vector data; + std::vector lod; + // for dataset and nextbatch + size_t batch_iter{0}; + std::vector> batched_lods; + std::vector> batched_datas; + std::vector> datasets; + DataRecord() = default; + explicit DataRecord(const std::string &path, int batch_size = 1) { + Load(path); + Prepare(batch_size); + batch_iter = 0; + } + void Load(const std::string &path) { + std::ifstream file(path); + std::string line; + int num_lines = 0; + datasets.resize(0); + while (std::getline(file, line)) { + num_lines++; + std::vector data; + split(line, ';', &data); + std::vector words_ids; + split_to_int64(data[1], ' ', &words_ids); + datasets.emplace_back(words_ids); + } + } + void Prepare(int bs) { + if (bs == 1) { + batched_datas = datasets; + for (auto one_sentence : datasets) { + batched_lods.push_back({0, one_sentence.size()}); + } + } else { + std::vector one_batch; + std::vector lod{0}; + int bs_id = 0; + for (auto one_sentence : datasets) { + bs_id++; + one_batch.insert(one_batch.end(), one_sentence.begin(), + one_sentence.end()); + lod.push_back(lod.back() + one_sentence.size()); + if (bs_id == bs) { + bs_id = 0; + batched_datas.push_back(one_batch); + batched_lods.push_back(lod); + one_batch.clear(); + one_batch.resize(0); + lod.clear(); + lod.resize(0); + lod.push_back(0); + } + } + if (one_batch.size() != 0) { + batched_datas.push_back(one_batch); + batched_lods.push_back(lod); + } + } + } + DataRecord NextBatch() { + DataRecord data; + data.data = batched_datas[batch_iter]; + data.lod = batched_lods[batch_iter]; + batch_iter++; + if (batch_iter >= batched_datas.size()) { + batch_iter = 0; + } + return data; + } +}; +void GetOneBatch(std::vector *input_slots, DataRecord *data, + int batch_size) { + auto one_batch = data->NextBatch(); + PaddleTensor input_tensor; + input_tensor.name = "word"; + input_tensor.shape.assign({static_cast(one_batch.data.size()), 1}); + input_tensor.lod.assign({one_batch.lod}); + input_tensor.dtype = PaddleDType::INT64; + TensorAssignData(&input_tensor, {one_batch.data}); + PADDLE_ENFORCE_EQ(batch_size, static_cast(one_batch.lod.size() - 1)); + input_slots->assign({input_tensor}); +} +static void PrintTime(const double latency, const int bs, const int repeat) { + LOG(INFO) << "===========profile result==========="; + LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat + << ", avg latency: " << latency / repeat << "ms"; + LOG(INFO) << "====================================="; +} +void BenchAllData(const std::string &model_path, const std::string &data_file, + const int batch_size, const int repeat) { + NativeConfig config; + config.model_dir = model_path; + config.use_gpu = false; + config.device = 0; + config.specify_input_name = true; + std::vector input_slots, outputs_slots; + DataRecord data(data_file, batch_size); + auto predictor = + CreatePaddlePredictor(config); + GetOneBatch(&input_slots, &data, batch_size); + for (int i = 0; i < FLAGS_burning; i++) { + predictor->Run(input_slots, &outputs_slots); + } + Timer timer; + double sum = 0; + for (int i = 0; i < repeat; i++) { + for (size_t bid = 0; bid < data.batched_datas.size(); ++bid) { + GetOneBatch(&input_slots, &data, batch_size); + timer.tic(); + predictor->Run(input_slots, &outputs_slots); + sum += timer.toc(); + } + } + PrintTime(sum, batch_size, repeat); +} +const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, + 25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43, + 44, 14, 15, 44, 14, 15, 44, 14, 15, 44, 38, 39, + 14, 15, 44, 22, 23, 23, 23, 23, 23, 23, 23}; +void TestLACPrediction(const std::string &model_path, + const std::string &data_file, const int batch_size, + const int repeat, bool test_all_data) { + if (test_all_data) { + BenchAllData(model_path, data_file, batch_size, repeat); + return; + } + NativeConfig config; + config.model_dir = model_path; + config.use_gpu = false; + config.device = 0; + config.specify_input_name = true; + std::vector input_slots, outputs_slots; + DataRecord data(data_file, batch_size); + GetOneBatch(&input_slots, &data, batch_size); + auto predictor = + CreatePaddlePredictor(config); + for (int i = 0; i < FLAGS_burning; i++) { + predictor->Run(input_slots, &outputs_slots); + } + Timer timer; + timer.tic(); + for (int i = 0; i < repeat; i++) { + predictor->Run(input_slots, &outputs_slots); + } + PrintTime(timer.toc(), batch_size, repeat); + EXPECT_EQ(outputs_slots.size(), 1UL); + auto &out = outputs_slots[0]; + size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1, + [](int a, int b) { return a * b; }); + size_t batch1_size = sizeof(lac_ref_data) / sizeof(int64_t); + PADDLE_ENFORCE_GT(size, 0); + EXPECT_GE(size, batch1_size); + int64_t *pdata = static_cast(out.data.data()); + for (size_t i = 0; i < batch1_size; ++i) { + EXPECT_EQ(pdata[i], lac_ref_data[i]); + } +} +TEST(Analyzer_LAC, native) { + LOG(INFO) << "LAC with native"; + TestLACPrediction(FLAGS_infer_model, FLAGS_infer_data, FLAGS_batch_size, + FLAGS_repeat, FLAGS_test_all_data); +} +} // namespace analysis +} // namespace inference +} // namespace paddle -- GitLab