diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt index b625a617a26cf9d34abe9931eaded9bc2797cf08..765f8a4486bb94792e198dea481ba3b6d153767a 100644 --- a/paddle/fluid/inference/analysis/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -100,12 +100,17 @@ inference_analysis_test(test_analyzer_lac SRCS analyzer_lac_tester.cc set(TEXT_CLASSIFICATION_MODEL_URL "http://paddle-inference-dist.bj.bcebos.com/text-classification-Senta.tar.gz") +set(TEXT_CLASSIFICATION_DATA_URL "http://paddle-inference-dist.bj.bcebos.com/text_classification_data.txt.tar.gz") set(TEXT_CLASSIFICATION_INSTALL_DIR "${THIRD_PARTY_PATH}/inference_demo/text_classification" CACHE PATH "Text Classification model and data root." FORCE) if (NOT EXISTS ${TEXT_CLASSIFICATION_INSTALL_DIR} AND WITH_TESTING AND WITH_INFERENCE) inference_download_and_uncompress(${TEXT_CLASSIFICATION_INSTALL_DIR} ${TEXT_CLASSIFICATION_MODEL_URL} "text-classification-Senta.tar.gz") + inference_download_and_uncompress(${TEXT_CLASSIFICATION_INSTALL_DIR} ${TEXT_CLASSIFICATION_DATA_URL} "text_classification_data.txt.tar.gz") endif() inference_analysis_test(test_text_classification SRCS analyzer_text_classification_tester.cc EXTRA_DEPS paddle_inference_api paddle_fluid_api analysis_predictor - ARGS --infer_model=${TEXT_CLASSIFICATION_INSTALL_DIR}/text-classification-Senta) + ARGS --infer_model=${TEXT_CLASSIFICATION_INSTALL_DIR}/text-classification-Senta + --infer_data=${TEXT_CLASSIFICATION_INSTALL_DIR}/data.txt + --topn=1 # Just run top 1 batch. + ) diff --git a/paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc b/paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc index 265e814acd594d6185251cbaa4d6880bb9ee7405..0e493176c4bfb154de0d079868f9f396813ec48f 100644 --- a/paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_text_classification_tester.cc @@ -16,8 +16,10 @@ #include #include // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files. #include +#include #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/inference/analysis/ut_helper.h" +#include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_pass.h" #include "paddle/fluid/inference/api/timer.h" @@ -26,6 +28,7 @@ DEFINE_string(infer_model, "", "Directory of the inference model."); DEFINE_string(infer_data, "", "Path of the dataset."); DEFINE_int32(batch_size, 1, "batch size."); DEFINE_int32(repeat, 1, "How many times to repeat run."); +DEFINE_int32(topn, -1, "Run top n batches of data to save time"); namespace paddle { @@ -45,41 +48,67 @@ void PrintTime(const double latency, const int bs, const int repeat) { LOG(INFO) << "====================================="; } -void Main(int batch_size) { - // Three sequence inputs. - std::vector input_slots(1); - // one batch starts - // data -- - int64_t data0[] = {0, 1, 2}; - for (auto &input : input_slots) { - input.data.Reset(data0, sizeof(data0)); - input.shape = std::vector({3, 1}); - // dtype -- - input.dtype = PaddleDType::INT64; - // LoD -- - input.lod = std::vector>({{0, 3}}); +struct DataReader { + DataReader(const std::string &path) : file(new std::ifstream(path)) {} + + bool NextBatch(PaddleTensor *tensor, int batch_size) { + PADDLE_ENFORCE_EQ(batch_size, 1); + std::string line; + tensor->lod.clear(); + tensor->lod.emplace_back(std::vector({0})); + std::vector data; + + for (int i = 0; i < batch_size; i++) { + if (!std::getline(*file, line)) return false; + inference::split_to_int64(line, ' ', &data); + } + tensor->lod.front().push_back(data.size()); + + tensor->data.Resize(data.size() * sizeof(int64_t)); + memcpy(tensor->data.data(), data.data(), data.size() * sizeof(int64_t)); + tensor->shape.clear(); + tensor->shape.push_back(data.size()); + tensor->shape.push_back(1); + return true; } + std::unique_ptr file; +}; + +void Main(int batch_size) { // shape -- // Create Predictor -- AnalysisConfig config; config.model_dir = FLAGS_infer_model; config.use_gpu = false; config.enable_ir_optim = true; - config.ir_passes.push_back("fc_lstm_fuse_pass"); auto predictor = CreatePaddlePredictor( config); + std::vector input_slots(1); + // one batch starts + // data -- + auto &input = input_slots[0]; + input.dtype = PaddleDType::INT64; + inference::Timer timer; double sum = 0; std::vector output_slots; - for (int i = 0; i < FLAGS_repeat; i++) { - timer.tic(); - CHECK(predictor->Run(input_slots, &output_slots)); - sum += timer.toc(); + + int num_batches = 0; + for (int t = 0; t < FLAGS_repeat; t++) { + DataReader reader(FLAGS_infer_data); + while (reader.NextBatch(&input, FLAGS_batch_size)) { + if (FLAGS_topn > 0 && num_batches > FLAGS_topn) break; + timer.tic(); + CHECK(predictor->Run(input_slots, &output_slots)); + sum += timer.toc(); + ++num_batches; + } } - PrintTime(sum, batch_size, FLAGS_repeat); + + PrintTime(sum, batch_size, num_batches); // Get output LOG(INFO) << "get outputs " << output_slots.size();