提交 24ced1d0 编写于 作者: L Luo Tao

add independent demo for test static fluid library

上级 092d6201
...@@ -54,9 +54,7 @@ endif() ...@@ -54,9 +54,7 @@ endif()
if(WITH_STATIC_LIB) if(WITH_STATIC_LIB)
set(DEPS set(DEPS
"-Wl,--whole-archive"
${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a
"-Wl,--no-whole-archive"
${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a) ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a)
else() else()
# Note: libpaddle_inference_api.so must put before libpaddle_fluid.so # Note: libpaddle_inference_api.so must put before libpaddle_fluid.so
......
...@@ -11,24 +11,22 @@ fi ...@@ -11,24 +11,22 @@ fi
mkdir -p build mkdir -p build
cd build cd build
for WITH_STATIC_LIB in false; do for WITH_STATIC_LIB in ON OFF; do
rm -rf * rm -rf *
cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \
-DWITH_MKL=$WITH_MKL \ -DWITH_MKL=$WITH_MKL \
-DDEMO_NAME=simple_on_word2vec \ -DDEMO_NAME=simple_on_word2vec \
-DWITH_GPU=$WITH_GPU \ -DWITH_GPU=$WITH_GPU \
-DWITH_STATIC_LIB=$WITH_STATIC_LIB -DWITH_STATIC_LIB=$WITH_STATIC_LIB
make make -j
for use_gpu in $use_gpu_list; do for use_gpu in $use_gpu_list; do
./simple_on_word2vec \ ./simple_on_word2vec \
--dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \
--use_gpu=$use_gpu --use_gpu=$use_gpu
if [ $? -ne 0 ]; then
echo "inference demo runs fail."
exit 1
fi
done done
done done
if [ $? -eq 0 ]; then
exit 0
else
echo "inference demo runs fail."
exit 1
fi
set +x set +x
# analysis and tensorrt must be added before creating static library,
# otherwise, there would be undefined reference to them in static library.
add_subdirectory(analysis)
if (TENSORRT_FOUND)
add_subdirectory(tensorrt)
endif()
set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor ) set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor )
# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? # TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal?
...@@ -35,9 +42,3 @@ if(WITH_TESTING) ...@@ -35,9 +42,3 @@ if(WITH_TESTING)
# both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book # both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book
add_subdirectory(tests/book) add_subdirectory(tests/book)
endif() endif()
add_subdirectory(analysis)
if (TENSORRT_FOUND)
add_subdirectory(tensorrt)
endif()
...@@ -508,6 +508,7 @@ function gen_fluid_inference_lib() { ...@@ -508,6 +508,7 @@ function gen_fluid_inference_lib() {
Deploying fluid inference library ... Deploying fluid inference library ...
======================================== ========================================
EOF EOF
cmake .. -DWITH_DISTRIBUTE=OFF
make -j `nproc` inference_lib_dist make -j `nproc` inference_lib_dist
cd ${PADDLE_ROOT}/build cd ${PADDLE_ROOT}/build
cp -r fluid_install_dir fluid cp -r fluid_install_dir fluid
...@@ -569,6 +570,7 @@ function main() { ...@@ -569,6 +570,7 @@ function main() {
fluid_inference_lib) fluid_inference_lib)
cmake_gen ${PYTHON_ABI:-""} cmake_gen ${PYTHON_ABI:-""}
gen_fluid_inference_lib gen_fluid_inference_lib
test_fluid_inference_lib
;; ;;
check_style) check_style)
check_style check_style
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册