From 83a693ee557f796b0801e6e9e59bfca2dca87308 Mon Sep 17 00:00:00 2001 From: Jacek Czaja Date: Fri, 11 Dec 2020 03:33:59 +0100 Subject: [PATCH] [oneDNN] Added Unit Test for Multiple instances prediction (#29501) * - Added infrastructre for new test - Added UT for Multiple models prediction - cosmetic fixes - lint - lint fixes * - Removed timeout for MMP test --- .../fluid/inference/tests/api/CMakeLists.txt | 11 ++ .../tests/api/analyzer_mmp_tester.cc | 111 ++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 paddle/fluid/inference/tests/api/analyzer_mmp_tester.cc diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index ba207109afd..56b222c75ce 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -54,6 +54,12 @@ function(inference_analysis_api_test target install_dir filename) ARGS --infer_model=${install_dir}/model --infer_data=${install_dir}/data.txt --refer_result=${install_dir}/result.txt) endfunction() +function(inference_multiple_models_analysis_api_test target install_dir filename) + inference_analysis_test(${target} SRCS ${filename} + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} + ARGS --infer_model=${install_dir}/mobilenet_v2_models/1 --infer_model2=${install_dir}/mobilenet_v2_models/xx --infer_model3=${install_dir}/mobilenet_v2_models/3) +endfunction() + function(inference_analysis_api_test_build TARGET_NAME filename) inference_analysis_test_build(${TARGET_NAME} SRCS ${filename} EXTRA_DEPS ${INFERENCE_EXTRA_DEPS}) @@ -462,6 +468,11 @@ set(BERT_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/bert_emb128") download_model_and_data(${BERT_INSTALL_DIR} "bert_emb128_model.tar.gz" "bert_data_len20.txt.tar.gz") inference_analysis_api_test(test_analyzer_bert ${BERT_INSTALL_DIR} analyzer_bert_tester.cc) +# multiple models prediction +set(MMP_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/multi_model_prediction") +download_data(${MMP_INSTALL_DIR} PaddleInference/mobilenet_v2_models.tar.gz) +inference_multiple_models_analysis_api_test(test_analyzer_multi_model_prediction ${MMP_INSTALL_DIR} analyzer_mmp_tester.cc) + if(WITH_GPU AND TENSORRT_FOUND) set(TRT_MODEL_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/trt_models") if (NOT EXISTS ${TRT_MODEL_INSTALL_DIR}/trt_inference_test_models.tar.gz) diff --git a/paddle/fluid/inference/tests/api/analyzer_mmp_tester.cc b/paddle/fluid/inference/tests/api/analyzer_mmp_tester.cc new file mode 100644 index 00000000000..4a5ec95934a --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_mmp_tester.cc @@ -0,0 +1,111 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/transfer_scope_cache.h" +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +#include + +// Here add missing commands +DEFINE_string(infer_model2, "", "model path"); +DEFINE_string(infer_model3, "", "model path"); + +namespace paddle { +namespace inference { + +// Shape of Input to models +const int N = 1, C = 3, H = 224, W = 224; + +void SetConfig(AnalysisConfig* config, const std::string& infer_model) { + config->SetModel(infer_model + "/__model__", infer_model + "/__params__"); + config->DisableFCPadding(); + config->SwitchUseFeedFetchOps(false); + config->SwitchSpecifyInputNames(true); +} + +std::unique_ptr InitializePredictor( + const std::string& infer_model, std::vector& data, bool use_mkldnn) { + AnalysisConfig cfg; + SetConfig(&cfg, infer_model); + if (use_mkldnn) { + cfg.EnableMKLDNN(); + } + + auto predictor = ::paddle::CreatePaddlePredictor(cfg); + auto input_name = predictor->GetInputNames()[0]; + auto input = predictor->GetInputTensor(input_name); + std::vector shape{N, C, H, W}; + input->Reshape(std::move(shape)); + input->copy_from_cpu(data.data()); + + return predictor; +} + +// Compare result of NativeConfig and AnalysisConfig +void compare(bool use_mkldnn = false) { + // Create Input to models + std::vector data(N * C * H * W); + std::default_random_engine re{1234}; + std::uniform_real_distribution sampler{0.0, 1.0}; + for (auto& v : data) { + v = sampler(re); + } + + // Initialize Models predictors + auto predictor_1 = InitializePredictor(FLAGS_infer_model, data, use_mkldnn); + auto predictor_xx = InitializePredictor(FLAGS_infer_model2, data, use_mkldnn); + auto predictor_3 = InitializePredictor(FLAGS_infer_model3, data, use_mkldnn); + + // Run single xx model + predictor_xx->ZeroCopyRun(); + auto output = + predictor_xx->GetOutputTensor(predictor_xx->GetOutputNames()[0]); + auto output_shape = output->shape(); + int numel = std::accumulate(output_shape.begin(), output_shape.end(), 1, + std::multiplies()); + std::vector xx_output(numel); + output->copy_to_cpu(xx_output.data()); + + // Initialize xx model's predictor to trigger oneDNN cache clearing + predictor_xx = + std::move(InitializePredictor(FLAGS_infer_model2, data, use_mkldnn)); + + // Run sequence of models + predictor_1->ZeroCopyRun(); + predictor_xx->ZeroCopyRun(); + predictor_3->ZeroCopyRun(); + + // Get again output of xx model , but when all three models were executed + std::vector xx2_output(numel); + output = predictor_xx->GetOutputTensor(predictor_xx->GetOutputNames()[0]); + output->copy_to_cpu(xx2_output.data()); + + // compare results + auto result = std::equal( + xx_output.begin(), xx_output.end(), xx2_output.begin(), + [](const float& l, const float& r) { return fabs(l - r) < 1e-4; }); + + PADDLE_ENFORCE_EQ(result, true, paddle::platform::errors::Fatal( + "Results of model run independently " + "differs from results of the same model " + "run as a sequence of models")); +} + +TEST(Analyzer_mmp, compare) { compare(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_mmp, compare_mkldnn) { compare(true /* use_mkldnn */); } +#endif + +} // namespace inference +} // namespace paddle -- GitLab