diff --git a/paddle/contrib/inference/demo_ci/CMakeLists.txt b/paddle/contrib/inference/demo_ci/CMakeLists.txt index 0d175b840d862376e59ce5ffd71622bd4cd8dc0c..09aace2d8a4b46291bd26fa387775293f643039f 100644 --- a/paddle/contrib/inference/demo_ci/CMakeLists.txt +++ b/paddle/contrib/inference/demo_ci/CMakeLists.txt @@ -11,9 +11,9 @@ if(NOT DEFINED DEMO_NAME) message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") endif() -option(WITH_MKLDNN "Compile PaddlePaddle with MKLDNN" OFF) -option(WITH_MKL "Compile PaddlePaddle with MKL support, default use openblas." ON) -option(WITH_GPU "Compile PaddlePaddle with GPU, default use CPU." OFF) +option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) +option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) +option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) if(WITH_GPU) set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") @@ -52,17 +52,21 @@ else() set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) endif() -set(ARCHIVE_START "-Wl,--whole-archive") -set(ARCHIVE_END "-Wl,--no-whole-archive") +if(WITH_STATIC_LIB) + set(DEPS + "-Wl,--whole-archive" + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a + "-Wl,--no-whole-archive" + ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a) +else() + set(DEPS + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so + ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so) +endif() set(EXTERNAL_LIB "-lrt -ldl -lpthread") -set(DEPS - ${ARCHIVE_START} - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a - ${ARCHIVE_END} - ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a - ${MATH_LIB} - ${MKLDNN_LIB} +set(DEPS ${DEPS} + ${MATH_LIB} ${MKLDNN_LIB} glog gflags protobuf snappystream snappy z ${EXTERNAL_LIB}) if(WITH_GPU) diff --git a/paddle/contrib/inference/demo_ci/run.sh b/paddle/contrib/inference/demo_ci/run.sh index ad79bce450d28bbea6c9b2ad39616f7496444586..e33e939463bf64e945740c7da22dd4397e24b85c 100755 --- a/paddle/contrib/inference/demo_ci/run.sh +++ b/paddle/contrib/inference/demo_ci/run.sh @@ -2,25 +2,28 @@ set -x PADDLE_ROOT=$1 WITH_MKL=$2 WITH_GPU=$3 - -mkdir -p build -cd build -rm -rf * - -cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ - -DWITH_MKL=$WITH_MKL \ - -DDEMO_NAME=simple_on_word2vec \ - -DWITH_GPU=$WITH_GPU -make if [ $3 == "ON" ]; then use_gpu_list='true false' else use_gpu_list='false' fi -for use_gpu in $use_gpu_list; do - ./simple_on_word2vec \ - --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ - --use_gpu=$use_gpu + +mkdir -p build +cd build + +for WITH_STATIC_LIB in true false; do + rm -rf * + cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ + -DWITH_MKL=$WITH_MKL \ + -DDEMO_NAME=simple_on_word2vec \ + -DWITH_GPU=$WITH_GPU \ + -DWITH_STATIC_LIB=$WITH_STATIC_LIB + make + for use_gpu in $use_gpu_list; do + ./simple_on_word2vec \ + --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ + --use_gpu=$use_gpu + done done if [ $? -eq 0 ]; then exit 0 diff --git a/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc b/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc index b3970e389e150206c9beb9ecc106ceb19c1bcdb0..9713837f86d40383da946af1681e1945c84336b0 100644 --- a/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc +++ b/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc @@ -61,11 +61,15 @@ void Main(bool use_gpu) { //# 4. Get output. PADDLE_ENFORCE(outputs.size(), 1UL); - LOG(INFO) << "output buffer size: " << outputs.front().data.length(); + // Check the output buffer size and result of each tid. + PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); + float result[5] = { + 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(5UL, num_elements); i++) { - LOG(INFO) << static_cast(outputs.front().data.data())[i]; + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + result[i]); } } } @@ -101,13 +105,16 @@ void MainThreads(int num_threads, bool use_gpu) { // 4. Get output. PADDLE_ENFORCE(outputs.size(), 1UL); - LOG(INFO) << "TID: " << tid << ", " - << "output buffer size: " << outputs.front().data.length(); + // Check the output buffer size and result of each tid. + PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); + float result[5] = { + 0.00129761, 0.00151112, 0.000423564, 0.00108815, 0.000932706}; const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. for (size_t i = 0; i < std::min(5UL, num_elements); i++) { - LOG(INFO) << static_cast(outputs.front().data.data())[i]; + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + result[i]); } } }); @@ -126,7 +133,6 @@ int main(int argc, char** argv) { paddle::demo::MainThreads(1, false /* use_gpu*/); paddle::demo::MainThreads(4, false /* use_gpu*/); if (FLAGS_use_gpu) { - LOG(INFO) << "use_gpu=true"; paddle::demo::Main(true /*use_gpu*/); paddle::demo::MainThreads(1, true /*use_gpu*/); paddle::demo::MainThreads(4, true /*use_gpu*/);