From 668563088e57523d97479d257fd005bbfaee2403 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Thu, 17 Jan 2019 16:35:25 +0800 Subject: [PATCH] add pyramid_dnn c++ inference test test=develop --- .../fluid/inference/tests/api/CMakeLists.txt | 5 + .../tests/api/analyzer_pyramid_dnn_tester.cc | 182 ++++++++++++++++++ 2 files changed, 187 insertions(+) create mode 100644 paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 0f6706588..e85e03dd3 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -86,6 +86,11 @@ set(MM_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mm_dnn") download_model_and_data(${MM_DNN_INSTALL_DIR} "MM_DNN_model.tar.gz" "MM_DNN_data.txt.tar.gz") inference_analysis_api_test(test_analyzer_mm_dnn ${MM_DNN_INSTALL_DIR} analyzer_mm_dnn_tester.cc) +# Pyramid DNN +set(PYRAMID_DNN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/pyramid_dnn") +download_model_and_data(${PYRAMID_DNN_INSTALL_DIR} "PyramidDNN_model.tar.gz" "PyramidDNN_data.txt.tar.gz") +inference_analysis_api_test(test_analyzer_pyramid_dnn ${PYRAMID_DNN_INSTALL_DIR} analyzer_pyramid_dnn_tester.cc) + # text_classification set(TEXT_CLASSIFICATION_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/text_classification") download_model_and_data(${TEXT_CLASSIFICATION_INSTALL_DIR} "text-classification-Senta.tar.gz" "text_classification_data.txt.tar.gz") diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc new file mode 100644 index 000000000..ad2c46e48 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -0,0 +1,182 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +namespace paddle { +namespace inference { +using contrib::AnalysisConfig; + +struct DataRecord { + std::vector> query_basic, query_phrase, title_basic, + title_phrase; + std::vector lod1, lod2, lod3, lod4; + size_t batch_iter{0}, batch_size{1}, num_samples; // total number of samples + DataRecord() = default; + explicit DataRecord(const std::string &path, int batch_size = 1) + : batch_size(batch_size) { + Load(path); + } + DataRecord NextBatch() { + DataRecord data; + size_t batch_end = batch_iter + batch_size; + // NOTE skip the final batch, if no enough data is provided. + if (batch_end <= query_basic.size()) { + GetInputPerBatch(query_basic, &data.query_basic, &data.lod1, batch_iter, + batch_end); + GetInputPerBatch(query_phrase, &data.query_phrase, &data.lod2, batch_iter, + batch_end); + GetInputPerBatch(title_basic, &data.title_basic, &data.lod3, batch_iter, + batch_end); + GetInputPerBatch(title_phrase, &data.title_phrase, &data.lod4, batch_iter, + batch_end); + } + batch_iter += batch_size; + return data; + } + void Load(const std::string &path) { + std::ifstream file(path); + std::string line; + int num_lines = 0; + while (std::getline(file, line)) { + std::vector data; + split(line, ';', &data); + // load query data + std::vector query_basic_data; + split_to_int64(data[1], ' ', &query_basic_data); + std::vector query_phrase_data; + split_to_int64(data[2], ' ', &query_phrase_data); + // load title data + std::vector title_basic_data; + split_to_int64(data[3], ' ', &title_basic_data); + std::vector title_phrase_data; + split_to_int64(data[4], ' ', &title_phrase_data); + // filter the empty data + bool flag = + data[1].size() && data[2].size() && data[3].size() && data[4].size(); + if (flag) { + query_basic.push_back(std::move(query_basic_data)); + query_phrase.push_back(std::move(query_phrase_data)); + title_basic.push_back(std::move(title_basic_data)); + title_phrase.push_back(std::move(title_phrase_data)); + num_lines++; + } + } + num_samples = num_lines; + } +}; + +void PrepareInputs(std::vector *input_slots, DataRecord *data, + int batch_size) { + PaddleTensor query_basic_tensor, query_phrase_tensor, title_basic_tensor, + title_phrase_tensor; + query_basic_tensor.name = "query_basic"; + query_phrase_tensor.name = "query_phrase"; + title_basic_tensor.name = "pos_title_basic"; + title_phrase_tensor.name = "pos_title_phrase"; + auto one_batch = data->NextBatch(); + // assign data + TensorAssignData(&query_basic_tensor, one_batch.query_basic, + one_batch.lod1); + TensorAssignData(&query_phrase_tensor, one_batch.query_phrase, + one_batch.lod2); + TensorAssignData(&title_basic_tensor, one_batch.title_basic, + one_batch.lod3); + TensorAssignData(&title_phrase_tensor, one_batch.title_phrase, + one_batch.lod4); + // Set inputs. + input_slots->assign({query_basic_tensor, query_phrase_tensor, + title_basic_tensor, title_phrase_tensor}); + for (auto &tensor : *input_slots) { + tensor.dtype = PaddleDType::INT64; + } +} + +void SetConfig(contrib::AnalysisConfig *cfg) { + cfg->SetModel(FLAGS_infer_model); + cfg->DisableGpu(); + cfg->SwitchSpecifyInputNames(); + cfg->SwitchIrOptim(); +} + +void SetInput(std::vector> *inputs) { + DataRecord data(FLAGS_infer_data, FLAGS_batch_size); + std::vector input_slots; + int epoch = FLAGS_test_all_data ? data.num_samples / FLAGS_batch_size : 1; + LOG(INFO) << "number of samples: " << epoch * FLAGS_batch_size; + for (int bid = 0; bid < epoch; ++bid) { + PrepareInputs(&input_slots, &data, FLAGS_batch_size); + (*inputs).emplace_back(input_slots); + } +} + +// Easy for profiling independently. +TEST(Analyzer_Pyramid_DNN, profile) { + contrib::AnalysisConfig cfg; + SetConfig(&cfg); + std::vector outputs; + + std::vector> input_slots_all; + SetInput(&input_slots_all); + TestPrediction(reinterpret_cast(&cfg), + input_slots_all, &outputs, FLAGS_num_threads); + + if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { + PADDLE_ENFORCE_EQ(outputs.size(), 1UL); + size_t size = GetSize(outputs[0]); + PADDLE_ENFORCE_GT(size, 0); + float *result = static_cast(outputs[0].data.data()); + // output is probability, which is in (0, 1). + for (size_t i = 0; i < size; i++) { + EXPECT_GT(result[i], 0); + EXPECT_LT(result[i], 1); + } + } +} + +// Check the fuse status +TEST(Analyzer_Pyramid_DNN, fuse_statis) { + contrib::AnalysisConfig cfg; + SetConfig(&cfg); + + int num_ops; + auto predictor = CreatePaddlePredictor(cfg); + auto fuse_statis = GetFuseStatis( + static_cast(predictor.get()), &num_ops); +} + +// Compare result of NativeConfig and AnalysisConfig +TEST(Analyzer_Pyramid_DNN, compare) { + contrib::AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareNativeAndAnalysis( + reinterpret_cast(&cfg), input_slots_all); +} + +// Compare Deterministic result +TEST(Analyzer_Pyramid_DNN, compare_determine) { + AnalysisConfig cfg; + SetConfig(&cfg); + + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareDeterministic(reinterpret_cast(&cfg), + input_slots_all); +} + +} // namespace inference +} // namespace paddle -- GitLab