diff --git a/paddle/contrib/inference/demo/CMakeLists.txt b/paddle/contrib/inference/demo/CMakeLists.txt index ecece6fe3471ad7b89c84c3e2b67af4ae9eb3c36..2d501bf0085b1bd4c39ee1a6dfaaa9622fd72ce1 100644 --- a/paddle/contrib/inference/demo/CMakeLists.txt +++ b/paddle/contrib/inference/demo/CMakeLists.txt @@ -13,8 +13,6 @@ # limitations under the License. # -inference_api_test(simple_on_word2vec ARGS test_word2vec) - option(WITH_INFERENCE_DEMO "Compile with Inference demo" OFF) if(NOT WITH_INFERENCE_DEMO) return() diff --git a/paddle/contrib/inference/demo_ci/CMakeLists.txt b/paddle/contrib/inference/demo_ci/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d175b840d862376e59ce5ffd71622bd4cd8dc0c --- /dev/null +++ b/paddle/contrib/inference/demo_ci/CMakeLists.txt @@ -0,0 +1,72 @@ +cmake_minimum_required(VERSION 3.0) + +project(cpp_inference_demo CXX C) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +if(NOT DEFINED PADDLE_LIB) + message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") +endif() +if(NOT DEFINED DEMO_NAME) + message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") +endif() + +option(WITH_MKLDNN "Compile PaddlePaddle with MKLDNN" OFF) +option(WITH_MKL "Compile PaddlePaddle with MKL support, default use openblas." ON) +option(WITH_GPU "Compile PaddlePaddle with GPU, default use CPU." OFF) + +if(WITH_GPU) + set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") +endif() + +include_directories("${PADDLE_LIB}") +include_directories("${PADDLE_LIB}/third_party/install/protobuf/include") +include_directories("${PADDLE_LIB}/third_party/install/glog/include") +include_directories("${PADDLE_LIB}/third_party/install/gflags/include") +include_directories("${PADDLE_LIB}/third_party/install/snappy/include") +include_directories("${PADDLE_LIB}/third_party/install/snappystream/include") +include_directories("${PADDLE_LIB}/third_party/install/zlib/include") + +include_directories("${PADDLE_LIB}/third_party/boost") +include_directories("${PADDLE_LIB}/third_party/eigen3") + +link_directories("${PADDLE_LIB}/third_party/install/snappy/lib") +link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib") +link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") +link_directories("${PADDLE_LIB}/third_party/install/glog/lib") +link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") +link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") + +add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) + +if(WITH_MKL) + include_directories("${PADDLE_LIB}/third_party/install/mklml/include") + set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel.so + ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5.so) + set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") + if(EXISTS ${MKLDNN_PATH}) + include_directories("${MKLDNN_PATH}/include") + set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) + endif() +else() + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) +endif() + +set(ARCHIVE_START "-Wl,--whole-archive") +set(ARCHIVE_END "-Wl,--no-whole-archive") +set(EXTERNAL_LIB "-lrt -ldl -lpthread") + +set(DEPS + ${ARCHIVE_START} + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a + ${ARCHIVE_END} + ${PADDLE_LIB}/contrib/inference/libpaddle_inference_api.a + ${MATH_LIB} + ${MKLDNN_LIB} + glog gflags protobuf snappystream snappy z + ${EXTERNAL_LIB}) +if(WITH_GPU) + set(DEPS ${DEPS} ${CUDA_LIB}/libcudart.so) +endif() + +target_link_libraries(${DEMO_NAME} ${DEPS}) diff --git a/paddle/contrib/inference/demo_ci/run.sh b/paddle/contrib/inference/demo_ci/run.sh new file mode 100755 index 0000000000000000000000000000000000000000..ad79bce450d28bbea6c9b2ad39616f7496444586 --- /dev/null +++ b/paddle/contrib/inference/demo_ci/run.sh @@ -0,0 +1,31 @@ +set -x +PADDLE_ROOT=$1 +WITH_MKL=$2 +WITH_GPU=$3 + +mkdir -p build +cd build +rm -rf * + +cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ + -DWITH_MKL=$WITH_MKL \ + -DDEMO_NAME=simple_on_word2vec \ + -DWITH_GPU=$WITH_GPU +make +if [ $3 == "ON" ]; then + use_gpu_list='true false' +else + use_gpu_list='false' +fi +for use_gpu in $use_gpu_list; do + ./simple_on_word2vec \ + --dirname=${PADDLE_ROOT}/build/python/paddle/fluid/tests/book/word2vec.inference.model \ + --use_gpu=$use_gpu +done +if [ $? -eq 0 ]; then + exit 0 +else + echo "inference demo runs fail." + exit 1 +fi +set +x diff --git a/paddle/contrib/inference/demo/simple_on_word2vec.cc b/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc similarity index 80% rename from paddle/contrib/inference/demo/simple_on_word2vec.cc rename to paddle/contrib/inference/demo_ci/simple_on_word2vec.cc index c253014642f39a042430992548a285cc7078a959..b3970e389e150206c9beb9ecc106ceb19c1bcdb0 100644 --- a/paddle/contrib/inference/demo/simple_on_word2vec.cc +++ b/paddle/contrib/inference/demo_ci/simple_on_word2vec.cc @@ -16,21 +16,27 @@ limitations under the License. */ * This file contains a simple demo for how to take a model for inference. */ +#include #include -#include #include #include -#include "paddle/contrib/inference/paddle_inference_api.h" +#include "contrib/inference/paddle_inference_api.h" +#include "paddle/fluid/platform/enforce.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); +DEFINE_bool(use_gpu, false, "Whether use gpu."); namespace paddle { namespace demo { -DEFINE_string(dirname, "", "Directory of the inference model."); - void Main(bool use_gpu) { //# 1. Create PaddlePredictor with a config. NativeConfig config; - config.model_dir = FLAGS_dirname + "word2vec.inference.model"; + if (FLAGS_dirname.empty()) { + LOG(INFO) << "Usage: ./simple_on_word2vec --dirname=path/to/your/model"; + exit(1); + } + config.model_dir = FLAGS_dirname; config.use_gpu = use_gpu; config.fraction_of_gpu_memory = 0.15; config.device = 0; @@ -54,7 +60,7 @@ void Main(bool use_gpu) { CHECK(predictor->Run(slots, &outputs)); //# 4. Get output. - ASSERT_EQ(outputs.size(), 1UL); + PADDLE_ENFORCE(outputs.size(), 1UL); LOG(INFO) << "output buffer size: " << outputs.front().data.length(); const size_t num_elements = outputs.front().data.length() / sizeof(float); // The outputs' buffers are in CPU memory. @@ -68,7 +74,7 @@ void MainThreads(int num_threads, bool use_gpu) { // Multi-threads only support on CPU // 0. Create PaddlePredictor with a config. NativeConfig config; - config.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config.model_dir = FLAGS_dirname; config.use_gpu = use_gpu; config.fraction_of_gpu_memory = 0.15; config.device = 0; @@ -94,7 +100,7 @@ void MainThreads(int num_threads, bool use_gpu) { CHECK(predictor->Run(inputs, &outputs)); // 4. Get output. - ASSERT_EQ(outputs.size(), 1UL); + PADDLE_ENFORCE(outputs.size(), 1UL); LOG(INFO) << "TID: " << tid << ", " << "output buffer size: " << outputs.front().data.length(); const size_t num_elements = @@ -111,15 +117,19 @@ void MainThreads(int num_threads, bool use_gpu) { } } -TEST(demo, word2vec_cpu) { Main(false /*use_gpu*/); } -TEST(demo_multi_threads, word2vec_cpu_1) { MainThreads(1, false /*use_gpu*/); } -TEST(demo_multi_threads, word2vec_cpu_4) { MainThreads(4, false /*use_gpu*/); } - -#ifdef PADDLE_WITH_CUDA -TEST(demo, word2vec_gpu) { Main(true /*use_gpu*/); } -TEST(demo_multi_threads, word2vec_gpu_1) { MainThreads(1, true /*use_gpu*/); } -TEST(demo_multi_threads, word2vec_gpu_4) { MainThreads(4, true /*use_gpu*/); } -#endif - } // namespace demo } // namespace paddle + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, true); + paddle::demo::Main(false /* use_gpu*/); + paddle::demo::MainThreads(1, false /* use_gpu*/); + paddle::demo::MainThreads(4, false /* use_gpu*/); + if (FLAGS_use_gpu) { + LOG(INFO) << "use_gpu=true"; + paddle::demo::Main(true /*use_gpu*/); + paddle::demo::MainThreads(1, true /*use_gpu*/); + paddle::demo::MainThreads(4, true /*use_gpu*/); + } + return 0; +} diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index d173b41e86f61954954b6a5ea9957d2e172deca0..bf45c11a9de53a109c72ff7a89b807bc80feb7c8 100755 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -510,11 +510,23 @@ function gen_fluid_inference_lib() { EOF make -j `nproc` inference_lib_dist cd ${PADDLE_ROOT}/build - mv fluid_install_dir fluid + cp -r fluid_install_dir fluid tar -cf fluid.tgz fluid fi } +function test_fluid_inference_lib() { + if [ ${WITH_C_API:-OFF} == "OFF" ] ; then + cat <