From f8a74ccc7b9005d9a73e806d91241137563301de Mon Sep 17 00:00:00 2001 From: Luo Tao Date: Tue, 10 Jul 2018 17:48:01 +0800 Subject: [PATCH] add shared library test --- .../contrib/inference/demo_ci/CMakeLists.txt | 28 ++++++++++------- paddle/contrib/inference/demo_ci/run.sh | 31 ++++++++++--------- .../inference/demo_ci/simple_on_word2vec.cc | 18 +++++++---- 3 files changed, 45 insertions(+), 32 deletions(-) diff --git a/paddle/contrib/inference/demo_ci/CMakeLists.txt b/paddle/contrib/inference/demo_ci/CMakeLists.txt index 0d175b840d8..09aace2d8a4 100644 --- a/paddle/contrib/inference/demo_ci/CMakeLists.txt +++ b/paddle/contrib/inference/demo_ci/CMakeLists.txt @@ -11,9 +11,9 @@ if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() -option(WITH_MKLDNN "Compile PaddlePaddle with MKLDNN" OFF) -option(WITH_MKL "Compile PaddlePaddle with MKL support, default use openblas." ON) -option(WITH_GPU "Compile PaddlePaddle with GPU, default use CPU." OFF) +option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) +option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) +option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) if(WITH_GPU) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") @@ -52,17 +52,21 @@ else() set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) endif() -set(ARCHIVE_START "-Wl,--whole-archive") -set(ARCHIVE_END "-Wl,--no-whole-archive") +if(WITH_STATIC_LIB) + set(DEPS + "-Wl,--whole-archive" + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a + "-Wl,--no-whole-archive" + ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a) +else() + set(DEPS + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so + ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so) +endif() set(EXTERNAL_LIB "-lrt -ldl -lpthread") -set(DEPS - ${ARCHIVE_START} - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a - ${ARCHIVE_END} - ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a - ${MATH_LIB} - ${MKLDNN_LIB} +set(DEPS ${DEPS} + ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf snappystream snappy z ${EXTERNAL_LIB}) if(WITH_GPU) diff --git a/paddle/contrib/inference/demo_ci/run.sh b/paddle/contrib/inference/demo_ci/run.sh index ad79bce450d..e33e939463b 100755 --- a/paddle/contrib/inference/demo_ci/run.sh +++ b/paddle/contrib/inference/demo_ci/run.sh @@ -2,25 +2,28 @@ set -x PADDLE_ROOT=$1 WITH_MKL=$2 WITH_GPU=$3 - -mkdir -p build -cd build -rm -rf * - -cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ - -DWITH_MKL=$WITH_MKL \ - -DDEMO_NAME=simple_on_word2vec \ - -DWITH_GPU=$WITH_GPU -make if [ $3 == "ON" ]; then use_gpu_list='true false' else use_gpu_list='false' fi -for use_gpu in $use_gpu_list; do - ./simple_on_word2vec \ - --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ - --use_gpu=$use_gpu + +mkdir -p build +cd build + +for WITH_STATIC_LIB in true false; do + rm -rf * + cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ + -DWITH_MKL=$WITH_MKL \ + -DDEMO_NAME=simple_on_word2vec \ + -DWITH_GPU=$WITH_GPU \ + -DWITH_STATIC_LIB=$WITH_STATIC_LIB + make + for use_gpu in $use_gpu_list; do + ./simple_on_word2vec \ + --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ + --use_gpu=$use_gpu + done done if [ $? -eq 0 ]; then exit 0 diff --git a/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc b/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc index b3970e389e1..9713837f86d 100644 --- a/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc +++ b/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc @@ -61,11 +61,15 @@ void Main(bool use_gpu) { //# 4. Get output. PADDLE_ENFORCE(outputs.size(), 1UL); - LOG(INFO) << "output buffer size: " << outputs.front().data.length(); + // Check the output buffer size and result of each tid. + PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); + float result[5] = { + 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(5UL, num_elements); i++) { - LOG(INFO) << static_cast(outputs.front().data.data())[i]; + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + result[i]); } } } @@ -101,13 +105,16 @@ void MainThreads(int num_threads, bool use_gpu) { // 4. Get output. PADDLE_ENFORCE(outputs.size(), 1UL); - LOG(INFO) << "TID: " << tid << ", " - << "output buffer size: " << outputs.front().data.length(); + // Check the output buffer size and result of each tid. + PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); + float result[5] = { + 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(5UL, num_elements); i++) { - LOG(INFO) << static_cast(outputs.front().data.data())[i]; + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + result[i]); } } }); @@ -126,7 +133,6 @@ int main(int argc, char** argv) { paddle::demo::MainThreads(1, false /* use_gpu*/); paddle::demo::MainThreads(4, false /* use_gpu*/); if (FLAGS_use_gpu) { - LOG(INFO) << "use_gpu=true"; paddle::demo::Main(true /*use_gpu*/); paddle::demo::MainThreads(1, true /*use_gpu*/); paddle::demo::MainThreads(4, true /*use_gpu*/); -- GitLab