From c1df7048c78b6265b87fbe7e2a72fc4e0fcf9e91 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Fri, 8 May 2020 11:33:35 +0800 Subject: [PATCH] add UT for mkldnn_cache_capacity (#24336) * add UT for mkldnn_cache_capacity test=develop * fix comparison of integer expressions of different signedness test=develop --- .../fluid/inference/api/analysis_predictor.cc | 8 ++ .../fluid/inference/tests/api/CMakeLists.txt | 9 ++ .../tests/api/analyzer_detect_tester.cc | 129 ++++++++++++++++++ 3 files changed, 146 insertions(+) create mode 100644 paddle/fluid/inference/tests/api/analyzer_detect_tester.cc diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 5220388a277..56eb65178d3 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -269,6 +269,14 @@ void AnalysisPredictor::MkldnnPostReset() { #ifdef PADDLE_WITH_MKLDNN // In cache clearing mode. if (config_.mkldnn_cache_capacity_ > 0) { + if (VLOG_IS_ON(2)) { + auto shape_blob_size = static_cast( + (&platform::DeviceContextPool::Instance()) + ->Get(platform::CPUPlace())) + ->GetShapeBlobSize(); + CHECK_LE(shape_blob_size, + static_cast(config_.mkldnn_cache_capacity_)); + } paddle::platform::set_cur_mkldnn_session_id( platform::kMKLDNNSessionID_Default); platform::set_cur_input_shape_cache_capacity(0); diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index ab11c7d8215..04876898151 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -195,6 +195,15 @@ if (NOT EXISTS ${OCR_INSTALL_DIR}) endif() inference_analysis_api_test(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_tester.cc) +# densebox +set(DENSEBOX_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/densebox") +download_data(${DENSEBOX_INSTALL_DIR} "densebox.tar.gz") +inference_analysis_test(test_analyzer_detect SRCS analyzer_detect_tester.cc + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + ARGS --infer_model=${DENSEBOX_INSTALL_DIR}/model --infer_data=${DENSEBOX_INSTALL_DIR}/detect_input_50.txt + --infer_shape=${DENSEBOX_INSTALL_DIR}/shape_50.txt) +set_property(TEST test_analyzer_detect PROPERTY ENVIRONMENT GLOG_vmodule=analysis_predictor=2) + # mobilenet with transpose op set(MOBILENET_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet") if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) diff --git a/paddle/fluid/inference/tests/api/analyzer_detect_tester.cc b/paddle/fluid/inference/tests/api/analyzer_detect_tester.cc new file mode 100644 index 00000000000..e2cc1f44999 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_detect_tester.cc @@ -0,0 +1,129 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +DEFINE_string(infer_shape, "", "data shape file"); +DEFINE_int32(sample, 20, "number of sample"); + +namespace paddle { +namespace inference { +namespace analysis { + +struct Record { + std::vector data; + std::vector shape; +}; + +Record ProcessALine(const std::string &line, const std::string &shape_line) { + VLOG(3) << "process a line"; + std::vector columns; + + Record record; + std::vector data_strs; + split(line, ' ', &data_strs); + for (auto &d : data_strs) { + record.data.push_back(std::stof(d)); + } + + std::vector shape_strs; + split(shape_line, ' ', &shape_strs); + for (auto &s : shape_strs) { + record.shape.push_back(std::stoi(s)); + } + return record; +} + +void SetConfig(AnalysisConfig *cfg) { + cfg->SetModel(FLAGS_infer_model + "/model", FLAGS_infer_model + "/params"); + cfg->DisableGpu(); + cfg->SwitchIrDebug(); + cfg->SwitchSpecifyInputNames(false); + cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); +} + +void SetInput(std::vector> *inputs, + const std::string &line, const std::string &shape_line) { + auto record = ProcessALine(line, shape_line); + + PaddleTensor input; + input.shape = record.shape; + input.dtype = PaddleDType::FLOAT32; + size_t input_size = record.data.size() * sizeof(float); + input.data.Resize(input_size); + memcpy(input.data.data(), record.data.data(), input_size); + std::vector input_slots; + input_slots.assign({input}); + (*inputs).emplace_back(input_slots); +} + +void profile(int cache_capacity = 1) { + AnalysisConfig cfg; + SetConfig(&cfg); + cfg.EnableMKLDNN(); + cfg.SetMkldnnCacheCapacity(cache_capacity); + + std::vector> outputs; + std::vector> input_slots_all; + + Timer run_timer; + double elapsed_time = 0; + + int num_times = FLAGS_repeat; + int sample = FLAGS_sample; + auto predictor = CreatePaddlePredictor(cfg); + outputs.resize(sample); + + std::vector threads; + + std::ifstream file(FLAGS_infer_data); + std::ifstream infer_file(FLAGS_infer_shape); + std::string line; + std::string shape_line; + + for (int i = 0; i < sample; i++) { + threads.emplace_back([&, i]() { + std::getline(file, line); + std::getline(infer_file, shape_line); + SetInput(&input_slots_all, line, shape_line); + + run_timer.tic(); + predictor->Run(input_slots_all[0], &outputs[0], FLAGS_batch_size); + elapsed_time += run_timer.toc(); + }); + threads[0].join(); + threads.clear(); + std::vector>().swap(input_slots_all); + } + file.close(); + infer_file.close(); + + auto batch_latency = elapsed_time / (sample * num_times); + PrintTime(FLAGS_batch_size, num_times, FLAGS_num_threads, 0, batch_latency, + sample, VarType::FP32); +} + +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_detect, profile_mkldnn) { + profile(5 /* cache_capacity */); + profile(10 /* cache_capacity */); +} +#endif + +} // namespace analysis +} // namespace inference +} // namespace paddle -- GitLab