未验证 提交 a7c1ed12 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #12115 from luotao1/demo

add independent demo for test static fluid library
...@@ -52,14 +52,12 @@ else() ...@@ -52,14 +52,12 @@ else()
set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a)
endif() endif()
# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a
if(WITH_STATIC_LIB) if(WITH_STATIC_LIB)
set(DEPS set(DEPS
"-Wl,--whole-archive" ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a)
"-Wl,--no-whole-archive"
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a)
else() else()
# Note: libpaddle_inference_api.so must put before libpaddle_fluid.so
set(DEPS set(DEPS
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.so
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so) ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so)
......
set -x set -x
PADDLE_ROOT=$1 PADDLE_ROOT=$1
WITH_MKL=$2 TURN_ON_MKL=$2 # use MKL or Openblas
WITH_GPU=$3 TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode
if [ $3 == "ON" ]; then if [ $2 == ON ]; then
# You can export yourself if move the install path
MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB}
fi
if [ $3 == ON ]; then
use_gpu_list='true false' use_gpu_list='true false'
else else
use_gpu_list='false' use_gpu_list='false'
...@@ -11,24 +16,22 @@ fi ...@@ -11,24 +16,22 @@ fi
mkdir -p build mkdir -p build
cd build cd build
for WITH_STATIC_LIB in false; do for WITH_STATIC_LIB in ON OFF; do
rm -rf * rm -rf *
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
-DWITH_MKL=$WITH_MKL \ -DWITH_MKL=$TURN_ON_MKL \
-DDEMO_NAME=simple_on_word2vec \ -DDEMO_NAME=simple_on_word2vec \
-DWITH_GPU=$WITH_GPU \ -DWITH_GPU=$TEST_GPU_CPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB -DWITH_STATIC_LIB=$WITH_STATIC_LIB
make make -j
for use_gpu in $use_gpu_list; do for use_gpu in $use_gpu_list; do
./simple_on_word2vec \ ./simple_on_word2vec \
--dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \
--use_gpu=$use_gpu --use_gpu=$use_gpu
done if [ $? -ne 0 ]; then
done
if [ $? -eq 0 ]; then
exit 0
else
echo "inference demo runs fail." echo "inference demo runs fail."
exit 1 exit 1
fi fi
done
done
set +x set +x
# analysis and tensorrt must be added before creating static library,
# otherwise, there would be undefined reference to them in static library.
add_subdirectory(analysis)
if (TENSORRT_FOUND)
add_subdirectory(tensorrt)
endif()
set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor ) set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor )
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? # TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?
...@@ -7,10 +14,6 @@ cc_library(paddle_fluid_api ...@@ -7,10 +14,6 @@ cc_library(paddle_fluid_api
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
if(WITH_CONTRIB)
set(fluid_modules "${fluid_modules}" paddle_inference_api)
endif()
# Create static library # Create static library
cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api) cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api)
if(NOT APPLE) if(NOT APPLE)
...@@ -35,9 +38,3 @@ if(WITH_TESTING) ...@@ -35,9 +38,3 @@ if(WITH_TESTING)
# both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book # both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book
add_subdirectory(tests/book) add_subdirectory(tests/book)
endif() endif()
add_subdirectory(analysis)
if (TENSORRT_FOUND)
add_subdirectory(tensorrt)
endif()
...@@ -516,6 +516,7 @@ function gen_fluid_inference_lib() { ...@@ -516,6 +516,7 @@ function gen_fluid_inference_lib() {
Deploying fluid inference library ... Deploying fluid inference library ...
======================================== ========================================
EOF EOF
cmake .. -DWITH_DISTRIBUTE=OFF
make -j `nproc` inference_lib_dist make -j `nproc` inference_lib_dist
cd ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build
cp -r fluid_install_dir fluid cp -r fluid_install_dir fluid
...@@ -531,7 +532,7 @@ function test_fluid_inference_lib() { ...@@ -531,7 +532,7 @@ function test_fluid_inference_lib() {
======================================== ========================================
EOF EOF
cd ${PADDLE_ROOT}/paddle/contrib/inference/demo_ci cd ${PADDLE_ROOT}/paddle/contrib/inference/demo_ci
sh run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF} ./run.sh ${PADDLE_ROOT} ${WITH_MKL:-ON} ${WITH_GPU:-OFF}
fi fi
} }
...@@ -577,6 +578,7 @@ function main() { ...@@ -577,6 +578,7 @@ function main() {
fluid_inference_lib) fluid_inference_lib)
cmake_gen ${PYTHON_ABI:-""} cmake_gen ${PYTHON_ABI:-""}
gen_fluid_inference_lib gen_fluid_inference_lib
test_fluid_inference_lib
;; ;;
check_style) check_style)
check_style check_style
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册