From a60957f3861aa7d9477c07abe8ae7c556621a72c Mon Sep 17 00:00:00 2001 From: Sylwester Fraczek Date: Wed, 7 Nov 2018 13:10:12 +0100 Subject: [PATCH] addd test_analyzer_mobilenet --- paddle/fluid/inference/analysis/analyzer.h | 6 +- .../fluid/inference/tests/api/CMakeLists.txt | 8 ++ .../tests/api/analyzer_mobilenet_tester.cc | 108 ++++++++++++++++++ 3 files changed, 120 insertions(+), 2 deletions(-) create mode 100644 paddle/fluid/inference/tests/api/analyzer_mobilenet_tester.cc diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h index 3af1d572d..b5dc1fbbe 100644 --- a/paddle/fluid/inference/analysis/analyzer.h +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -66,7 +66,10 @@ class Analyzer : public OrderedRegistry { // merged in a larger fuse op. The small fusion will not break the pattern of // larger fusion. const std::vector all_ir_passes_{{ - // Manual update the passes here. +// Manual update the passes here. +#ifdef PADDLE_WITH_MKLDNN + "depthwise_conv_mkldnn_pass", // +#endif "attention_lstm_fuse_pass", // "seqconv_eltadd_relu_fuse_pass", // "embedding_fc_lstm_fuse_pass", // @@ -79,7 +82,6 @@ class Analyzer : public OrderedRegistry { "conv_bn_fuse_pass", // "conv_eltwiseadd_bn_fuse_pass", // #ifdef PADDLE_WITH_MKLDNN - "depthwise_conv_mkldnn_pass", // "conv_bias_mkldnn_fuse_pass", // "conv_relu_mkldnn_fuse_pass", // "conv_elementwise_add_mkldnn_fuse_pass", // diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 2ca84c800..10ad25230 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -82,6 +82,14 @@ inference_analysis_api_test(test_analyzer_ocr ${OCR_INSTALL_DIR} analyzer_vis_te inference_analysis_api_test_with_fake_data(test_analyzer_resnet50 "${INFERENCE_DEMO_INSTALL_DIR}/resnet50" analyzer_resnet50_tester.cc "resnet50_model.tar.gz") +# mobilenet +set(MOBILENET_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/mobilenet") +if (NOT EXISTS ${MOBILENET_INSTALL_DIR}) + inference_download_and_uncompress(${MOBILENET_INSTALL_DIR} "http://paddle-inference-dist.bj.bcebos.com/tensorrt_test" "mobilenet.tar.gz") +endif() +inference_analysis_test(test_analyzer_mobilenet SRCS analyzer_mobilenet_tester.cc + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} ARGS --infer_model=${MOBILENET_INSTALL_DIR}/mobilenet) + # anakin if (WITH_ANAKIN AND WITH_MKL) # only needed in CI # anakin rnn1 diff --git a/paddle/fluid/inference/tests/api/analyzer_mobilenet_tester.cc b/paddle/fluid/inference/tests/api/analyzer_mobilenet_tester.cc new file mode 100644 index 000000000..94ded50e6 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_mobilenet_tester.cc @@ -0,0 +1,108 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void SetConfig(AnalysisConfig *cfg) { + cfg->model_dir = FLAGS_infer_model; + cfg->use_gpu = false; + cfg->device = 0; + cfg->enable_ir_optim = true; + cfg->specify_input_name = true; +} + +void SetInput(std::vector> *inputs) { + PADDLE_ENFORCE_EQ(FLAGS_test_all_data, 0, "Only have single batch of data."); + + PaddleTensor input; + // channel=3, height/width=318 + std::vector shape({FLAGS_batch_size, 3, 318, 318}); + input.shape = shape; + input.dtype = PaddleDType::FLOAT32; + + // fill input data, for profile easily, do not use random data here. + size_t size = FLAGS_batch_size * 3 * 318 * 318; + input.data.Resize(size * sizeof(float)); + float *input_data = static_cast(input.data.data()); + for (size_t i = 0; i < size; i++) { + *(input_data + i) = static_cast(i) / size; + } + + std::vector input_slots; + input_slots.assign({input}); + (*inputs).emplace_back(input_slots); +} + +// Easy for profiling independently. +void profile(bool use_mkldnn = false) { + AnalysisConfig cfg; + SetConfig(&cfg); + cfg._use_mkldnn = use_mkldnn; + std::vector outputs; + + std::vector> input_slots_all; + SetInput(&input_slots_all); + TestPrediction(cfg, input_slots_all, &outputs, FLAGS_num_threads); + + if (FLAGS_num_threads == 1 && !FLAGS_test_all_data) { + PADDLE_ENFORCE_EQ(outputs.size(), 1UL); + size_t size = GetSize(outputs[0]); + // output is a 1000-dimension feature + EXPECT_EQ(size, 1000 * FLAGS_batch_size); + } +} + +TEST(Analyzer_mobilenet, profile) { profile(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_mobilenet, profile_mkldnn) { profile(true /* use_mkldnn */); } +#endif + +// Check the depthwise_conv status +TEST(Analyzer_mobilenet, depthwise_conv_statis) { + AnalysisConfig cfg; + SetConfig(&cfg); + cfg._use_mkldnn = true; + int num_ops; + auto predictor = CreatePaddlePredictor(cfg); + auto fuse_statis = GetFuseStatis( + static_cast(predictor.get()), &num_ops); + ASSERT_TRUE(fuse_statis.count("depthwise_conv_mkldnn_pass")); + EXPECT_EQ(fuse_statis.at("depthwise_conv_mkldnn_pass"), 13); +} + +// Compare result of NativeConfig and AnalysisConfig +void compare(bool use_mkldnn = false) { + AnalysisConfig cfg; + SetConfig(&cfg); + cfg._use_mkldnn = use_mkldnn; + + std::vector> input_slots_all; + SetInput(&input_slots_all); + CompareNativeAndAnalysis(cfg, input_slots_all); +} + +TEST(Analyzer_mobilenet, compare) { compare(); } +#ifdef PADDLE_WITH_MKLDNN +TEST(Analyzer_mobilenet, compare_mkldnn) { compare(true /* use_mkldnn */); } +#endif + +} // namespace analysis +} // namespace inference +} // namespace paddle -- GitLab