diff --git a/paddle/contrib/inference/demo_ci/CMakeLists.txt b/paddle/contrib/inference/demo_ci/CMakeLists.txt index 789bff7f23cd89bfaeba180efa95972cef6fc58c..33a0dfaf88b692f5d290920d97a650ad67f3dbca 100644 --- a/paddle/contrib/inference/demo_ci/CMakeLists.txt +++ b/paddle/contrib/inference/demo_ci/CMakeLists.txt @@ -52,14 +52,12 @@ else() set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) endif() +# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a if(WITH_STATIC_LIB) set(DEPS - "-Wl,--whole-archive" - ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a - "-Wl,--no-whole-archive" - ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a) + ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a) else() - # Note: libpaddle_inference_api.so must put before libpaddle_fluid.so set(DEPS ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so) diff --git a/paddle/contrib/inference/demo_ci/run.sh b/paddle/contrib/inference/demo_ci/run.sh index e3a7269af795b05c296423cb2dc92b753397c6b3..4f5b8b52ef33d5258f966f2d85e39aea12760211 100755 --- a/paddle/contrib/inference/demo_ci/run.sh +++ b/paddle/contrib/inference/demo_ci/run.sh @@ -1,8 +1,13 @@ set -x PADDLE_ROOT=$1 -WITH_MKL=$2 -WITH_GPU=$3 -if [ $3 == "ON" ]; then +TURN_ON_MKL=$2 # use MKL or Openblas +TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode +if [ $2 == ON ]; then + # You can export yourself if move the install path + MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB} +fi +if [ $3 == ON ]; then use_gpu_list='true false' else use_gpu_list='false' @@ -11,24 +16,22 @@ fi mkdir -p build cd build -for WITH_STATIC_LIB in false; do +for WITH_STATIC_LIB in ON OFF; do rm -rf * cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ - -DWITH_MKL=$WITH_MKL \ + -DWITH_MKL=$TURN_ON_MKL \ -DDEMO_NAME=simple_on_word2vec \ - -DWITH_GPU=$WITH_GPU \ + -DWITH_GPU=$TEST_GPU_CPU \ -DWITH_STATIC_LIB=$WITH_STATIC_LIB - make + make -j for use_gpu in $use_gpu_list; do ./simple_on_word2vec \ --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ --use_gpu=$use_gpu + if [ $? -ne 0 ]; then + echo "inference demo runs fail." + exit 1 + fi done done -if [ $? -eq 0 ]; then - exit 0 -else - echo "inference demo runs fail." - exit 1 -fi set +x diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index b1c33c3415f49f9b1160655034350087432d0cb0..86643b9aa111a9d73c184097e0c43bbec30c6b31 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,3 +1,10 @@ +# analysis and tensorrt must be added before creating static library, +# otherwise, there would be undefined reference to them in static library. +add_subdirectory(analysis) +if (TENSORRT_FOUND) + add_subdirectory(tensorrt) +endif() + set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor ) # TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? @@ -7,10 +14,6 @@ cc_library(paddle_fluid_api get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) -if(WITH_CONTRIB) - set(fluid_modules "${fluid_modules}" paddle_inference_api) -endif() - # Create static library cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api) if(NOT APPLE) @@ -35,9 +38,3 @@ if(WITH_TESTING) # both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book add_subdirectory(tests/book) endif() - -add_subdirectory(analysis) - -if (TENSORRT_FOUND) - add_subdirectory(tensorrt) -endif() diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index f21fd5cd6ec1a7c526694bfc3e7fc574cbbc365d..4bdd86990f82be96085dca640cd52ffb843b2c32 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -516,6 +516,7 @@ function gen_fluid_inference_lib() { Deploying fluid inference library ... ======================================== EOF + cmake .. -DWITH_DISTRIBUTE=OFF make -j `nproc` inference_lib_dist cd ${PADDLE_ROOT}/build cp -r fluid_install_dir fluid @@ -531,7 +532,7 @@ function test_fluid_inference_lib() { ======================================== EOF cd ${PADDLE_ROOT}/paddle/contrib/inference/demo_ci - sh run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} + ./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} fi } @@ -577,6 +578,7 @@ function main() { fluid_inference_lib) cmake_gen ${PYTHON_ABI:-""} gen_fluid_inference_lib + test_fluid_inference_lib ;; check_style) check_style