From 6313bdc35f99e553b1f1315c8436eb351875397d Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Mon, 16 Jul 2018 17:26:59 +0800 Subject: [PATCH] add independent vis_demo for inference --- paddle/contrib/inference/CMakeLists.txt | 4 -- paddle/contrib/inference/demo/CMakeLists.txt | 59 ------------------ paddle/contrib/inference/demo/README.md | 36 ----------- paddle/contrib/inference/demo_ci/.gitignore | 1 + paddle/contrib/inference/demo_ci/README.md | 26 ++++++++ paddle/contrib/inference/demo_ci/run.sh | 43 ++++++++++++- .../inference/{demo => demo_ci}/utils.h | 2 +- .../inference/{demo => demo_ci}/vis_demo.cc | 62 ++++++++++--------- 8 files changed, 104 insertions(+), 129 deletions(-) delete mode 100644 paddle/contrib/inference/demo/CMakeLists.txt delete mode 100644 paddle/contrib/inference/demo/README.md create mode 100644 paddle/contrib/inference/demo_ci/.gitignore create mode 100644 paddle/contrib/inference/demo_ci/README.md rename paddle/contrib/inference/{demo => demo_ci}/utils.h (96%) rename paddle/contrib/inference/{demo => demo_ci}/vis_demo.cc (75%) diff --git a/paddle/contrib/inference/CMakeLists.txt b/paddle/contrib/inference/CMakeLists.txt index 87173fc42a4..df470a07521 100644 --- a/paddle/contrib/inference/CMakeLists.txt +++ b/paddle/contrib/inference/CMakeLists.txt @@ -104,7 +104,3 @@ if (WITH_ANAKIN) # only needed in CI target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) endif(WITH_TESTING) endif() - -if(WITH_TESTING) - add_subdirectory(demo) -endif() diff --git a/paddle/contrib/inference/demo/CMakeLists.txt b/paddle/contrib/inference/demo/CMakeLists.txt deleted file mode 100644 index 2d501bf0085..00000000000 --- a/paddle/contrib/inference/demo/CMakeLists.txt +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -option(WITH_INFERENCE_DEMO "Compile with Inference demo" OFF) -if(NOT WITH_INFERENCE_DEMO) - return() -endif() - -set(DEMO_INSTALL_DIR "${PADDLE_BINARY_DIR}/inference_demo") -set(URL_ROOT http://paddlemodels.bj.bcebos.com/inference-vis-demos%2F) - -function(inference_download_test_demo TARGET) - if (NOT WITH_TESTING) - return() - endif() - set(options "") - set(oneValueArgs URL) - set(multiValueArgs SRCS) - cmake_parse_arguments(tests "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - - set(test_dir "${DEMO_INSTALL_DIR}/${TARGET}") - message(STATUS "inference demo ${test_dir}") - - if(NOT EXISTS "${test_dir}") - message(STATUS "Download ${TARGET} model from ${tests_URL}") - execute_process(COMMAND bash -c "mkdir -p ${test_dir}") - execute_process(COMMAND bash -c "cd ${test_dir}; wget -q ${tests_URL}") - execute_process(COMMAND bash -c "cd ${test_dir}; tar xzf *.tar.gz") - endif() - - cc_test(${TARGET} SRCS "${tests_SRCS}" - DEPS paddle_inference_api paddle_fluid - ARGS --data=${test_dir}/data.txt - --modeldir=${test_dir}/model - --refer=${test_dir}/result.txt) -endfunction() - -# disable mobilenet test -#inference_download_test_demo(mobilenet_inference_demo -# SRCS vis_demo.cc -# URL ${URL_ROOT}mobilenet.tar.gz) -inference_download_test_demo(se_resnext50_inference_demo - SRCS vis_demo.cc - URL ${URL_ROOT}se_resnext50.tar.gz) -inference_download_test_demo(ocr_inference_demo - SRCS vis_demo.cc - URL ${URL_ROOT}ocr.tar.gz) diff --git a/paddle/contrib/inference/demo/README.md b/paddle/contrib/inference/demo/README.md deleted file mode 100644 index f1d25666029..00000000000 --- a/paddle/contrib/inference/demo/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# Infernce Demos - -Input data format: - -- Each line contains a single record -- Each record's format is - -``` -\t -``` - -Follow the C++ codes in `vis_demo.cc`. - -## MobileNet - -To execute the demo, simply run - -```sh -./mobilenet_inference_demo --modeldir --data -``` - -## SE-ResNeXt-50 - -To execute the demo, simply run - -```sh -./se_resnext50_inference_demo --modeldir --data -``` - -## OCR - -To execute the demo, simply run - -```sh -./ocr_inference_demo --modeldir --data -``` diff --git a/paddle/contrib/inference/demo_ci/.gitignore b/paddle/contrib/inference/demo_ci/.gitignore new file mode 100644 index 00000000000..1269488f7fb --- /dev/null +++ b/paddle/contrib/inference/demo_ci/.gitignore @@ -0,0 +1 @@ +data diff --git a/paddle/contrib/inference/demo_ci/README.md b/paddle/contrib/inference/demo_ci/README.md new file mode 100644 index 00000000000..7f013da7f30 --- /dev/null +++ b/paddle/contrib/inference/demo_ci/README.md @@ -0,0 +1,26 @@ +# Inference Demos + +There are several demos: + +- simple_on_word2vec: + - Follow the C++ codes is in `simple_on_word2vec.cc`. + - It is suitable for word2vec model. +- vis_demo: + - Follow the C++ codes is in `vis_demo.cc`. + - It is suitable for mobilenet, se_resnext50 and ocr three models. + - Input data format: + - Each line contains a single record + - Each record's format is + ``` + \t + ``` + +To build and execute the demos, simply run +``` +./run.sh $PADDLE_ROOT $TURN_ON_MKL $TEST_GPU_CPU +``` +- It will build and execute the demos in both static and shared library. +- `$PADDLE_ROOT`: paddle library path +- `$TURN_ON_MKL`: use MKL or Openblas +- `$TEST_GPU_CPU`: test both GPU/CPU mode or only CPU mode +- NOTE: for simple_on_word2vec, must run `ctest -R test_word2vec -R` to obtain word2vec model at first. diff --git a/paddle/contrib/inference/demo_ci/run.sh b/paddle/contrib/inference/demo_ci/run.sh index 4f5b8b52ef3..45075887b2a 100755 --- a/paddle/contrib/inference/demo_ci/run.sh +++ b/paddle/contrib/inference/demo_ci/run.sh @@ -13,10 +13,30 @@ else use_gpu_list='false' fi +# download vis_demo data +function download() { + dir_name=$1 + mkdir -p $dir_name + cd $dir_name + wget -q ${URL_ROOT}$dir_name.tar.gz + tar xzf *.tar.gz + cd .. +} +URL_ROOT=http://paddlemodels.bj.bcebos.com/inference-vis-demos%2F +mkdir -p data +cd data +vis_demo_list='se_resnext50 ocr mobilenet' +for vis_demo_name in $vis_demo_list; do + download $vis_demo_name +done +cd .. + +# compile and test the demo mkdir -p build cd build for WITH_STATIC_LIB in ON OFF; do + # -----simple_on_word2vec----- rm -rf * cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ -DWITH_MKL=$TURN_ON_MKL \ @@ -29,9 +49,30 @@ for WITH_STATIC_LIB in ON OFF; do --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ --use_gpu=$use_gpu if [ $? -ne 0 ]; then - echo "inference demo runs fail." + echo "simple_on_word2vec demo runs fail." exit 1 fi done + # ---------vis_demo--------- + rm -rf * + cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ + -DWITH_MKL=$TURN_ON_MKL \ + -DDEMO_NAME=vis_demo \ + -DWITH_GPU=$TEST_GPU_CPU \ + -DWITH_STATIC_LIB=$WITH_STATIC_LIB + make -j + for use_gpu in false; do + for vis_demo_name in $vis_demo_list; do + ./vis_demo \ + --modeldir=../data/$vis_demo_name/model \ + --data=../data/$vis_demo_name/data.txt \ + --refer=../data/$vis_demo_name/result.txt \ + --use_gpu=$use_gpu + if [ $? -ne 0 ]; then + echo "vis demo $vis_demo_name runs fail." + exit 1 + fi + done + done done set +x diff --git a/paddle/contrib/inference/demo/utils.h b/paddle/contrib/inference/demo_ci/utils.h similarity index 96% rename from paddle/contrib/inference/demo/utils.h rename to paddle/contrib/inference/demo_ci/utils.h index b5330d8d9d8..017b39edaf1 100644 --- a/paddle/contrib/inference/demo/utils.h +++ b/paddle/contrib/inference/demo_ci/utils.h @@ -16,7 +16,7 @@ #include #include -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "contrib/inference/paddle_inference_api.h" namespace paddle { namespace demo { diff --git a/paddle/contrib/inference/demo/vis_demo.cc b/paddle/contrib/inference/demo_ci/vis_demo.cc similarity index 75% rename from paddle/contrib/inference/demo/vis_demo.cc rename to paddle/contrib/inference/demo_ci/vis_demo.cc index 45575f9a862..1c570a9f658 100644 --- a/paddle/contrib/inference/demo/vis_demo.cc +++ b/paddle/contrib/inference/demo_ci/vis_demo.cc @@ -18,19 +18,14 @@ limitations under the License. */ #include #include // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files. -#include #include #include -#include "paddle/contrib/inference/demo/utils.h" -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/platform/enforce.h" +#include "utils.h" #ifdef PADDLE_WITH_CUDA DECLARE_double(fraction_of_gpu_memory_to_use); #endif - -namespace paddle { -namespace demo { - DEFINE_string(modeldir, "", "Directory of the inference model."); DEFINE_string(refer, "", "path to reference result for comparison."); DEFINE_string( @@ -38,6 +33,10 @@ DEFINE_string( "", "path of data; each line is a record, format is " "'\t data; @@ -47,7 +46,7 @@ struct Record { void split(const std::string& str, char sep, std::vector* pieces); Record ProcessALine(const std::string& line) { - LOG(INFO) << "process a line"; + VLOG(3) << "process a line"; std::vector columns; split(line, '\t', &columns); CHECK_EQ(columns.size(), 2UL) @@ -65,8 +64,8 @@ Record ProcessALine(const std::string& line) { for (auto& s : shape_strs) { record.shape.push_back(std::stoi(s)); } - LOG(INFO) << "data size " << record.data.size(); - LOG(INFO) << "data shape size " << record.shape.size(); + VLOG(3) << "data size " << record.data.size(); + VLOG(3) << "data shape size " << record.shape.size(); return record; } @@ -78,20 +77,22 @@ void CheckOutput(const std::string& referfile, const PaddleTensor& output) { file.close(); size_t numel = output.data.length() / PaddleDtypeSize(output.dtype); - LOG(INFO) << "predictor output numel " << numel; - LOG(INFO) << "reference output numel " << refer.data.size(); - EXPECT_EQ(numel, refer.data.size()); + VLOG(3) << "predictor output numel " << numel; + VLOG(3) << "reference output numel " << refer.data.size(); + PADDLE_ENFORCE_EQ(numel, refer.data.size()); switch (output.dtype) { case PaddleDType::INT64: { for (size_t i = 0; i < numel; ++i) { - EXPECT_EQ(static_cast(output.data.data())[i], refer.data[i]); + PADDLE_ENFORCE_EQ(static_cast(output.data.data())[i], + refer.data[i]); } break; } case PaddleDType::FLOAT32: for (size_t i = 0; i < numel; ++i) { - EXPECT_NEAR( - static_cast(output.data.data())[i], refer.data[i], 1e-5); + PADDLE_ENFORCE_LT( + fabs(static_cast(output.data.data())[i] - refer.data[i]), + 1e-5); } break; } @@ -106,15 +107,15 @@ void Main(bool use_gpu) { config.prog_file = FLAGS_modeldir + "/__model__"; config.use_gpu = use_gpu; config.device = 0; -#ifdef PADDLE_WITH_CUDA - config.fraction_of_gpu_memory = FLAGS_fraction_of_gpu_memory_to_use; -#endif + if (FLAGS_use_gpu) { + config.fraction_of_gpu_memory = 0.1; // set by yourself + } - LOG(INFO) << "init predictor"; + VLOG(3) << "init predictor"; auto predictor = CreatePaddlePredictor(config); - LOG(INFO) << "begin to process data"; + VLOG(3) << "begin to process data"; // Just a single batch of data. std::string line; std::ifstream file(FLAGS_data); @@ -129,21 +130,26 @@ void Main(bool use_gpu) { .data = PaddleBuf(record.data.data(), record.data.size() * sizeof(float)), .dtype = PaddleDType::FLOAT32}; - LOG(INFO) << "run executor"; + VLOG(3) << "run executor"; std::vector output; predictor->Run({input}, &output); - LOG(INFO) << "output.size " << output.size(); + VLOG(3) << "output.size " << output.size(); auto& tensor = output.front(); - LOG(INFO) << "output: " << SummaryTensor(tensor); + VLOG(3) << "output: " << SummaryTensor(tensor); // compare with reference result CheckOutput(FLAGS_refer, tensor); } -TEST(demo, vis_demo_cpu) { Main(false /*use_gpu*/); } -#ifdef PADDLE_WITH_CUDA -TEST(demo, vis_demo_gpu) { Main(true /*use_gpu*/); } -#endif } // namespace demo } // namespace paddle + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, true); + paddle::demo::Main(false /* use_gpu*/); + if (FLAGS_use_gpu) { + paddle::demo::Main(true /*use_gpu*/); + } + return 0; +} -- GitLab