diff --git a/lite/CMakeLists.txt b/lite/CMakeLists.txt index 5b8a420b2a6b127ebbd6ce4005a426b03b527c0c..c4dd769b4c9dbda8379aef631b6f44ce3aea9d22 100644 --- a/lite/CMakeLists.txt +++ b/lite/CMakeLists.txt @@ -108,59 +108,53 @@ if (LITE_WITH_PYTHON) add_dependencies(publish_inference publish_inference_python_light_demo) endif() +if (LITE_WITH_CUDA OR LITE_WITH_X86) + add_custom_target(publish_inference_cxx_lib ${TARGET} + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/bin" + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/include" + COMMAND cp "${CMAKE_SOURCE_DIR}/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/cxx/include" + COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_full_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" + COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_light_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" + COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/*.so" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" + ) + add_custom_target(publish_inference_third_party ${TARGET} + COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party" + COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/*" "${INFER_LITE_PUBLISH_ROOT}/third_party") + add_dependencies(publish_inference_cxx_lib bundle_full_api) + add_dependencies(publish_inference_cxx_lib bundle_light_api) + add_dependencies(publish_inference_cxx_lib paddle_full_api_shared) + add_dependencies(publish_inference_cxx_lib paddle_light_api_shared) + add_dependencies(publish_inference publish_inference_cxx_lib) + add_dependencies(publish_inference publish_inference_third_party) +endif() + if (LITE_WITH_X86) add_custom_target(publish_inference_x86_cxx_lib ${TARGET} - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/bin" - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/include" - COMMAND cp "${CMAKE_SOURCE_DIR}/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/cxx/include" - COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_full_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" - COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_light_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" - COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/*.so" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/test_model_bin" "${INFER_LITE_PUBLISH_ROOT}/bin" ) - add_dependencies(publish_inference_x86_cxx_lib bundle_full_api) - add_dependencies(publish_inference_x86_cxx_lib bundle_light_api) add_dependencies(publish_inference_x86_cxx_lib test_model_bin) - add_dependencies(publish_inference_x86_cxx_lib paddle_full_api_shared) - add_dependencies(publish_inference_x86_cxx_lib paddle_light_api_shared) - add_dependencies(publish_inference publish_inference_x86_cxx_lib) add_custom_target(publish_inference_x86_cxx_demos ${TARGET} COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party" - COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/*" "${INFER_LITE_PUBLISH_ROOT}/third_party" COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/eigen3" "${INFER_LITE_PUBLISH_ROOT}/third_party" - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" ) add_dependencies(publish_inference_x86_cxx_lib publish_inference_x86_cxx_demos) add_dependencies(publish_inference_x86_cxx_demos paddle_full_api_shared eigen3) + add_dependencies(publish_inference publish_inference_x86_cxx_lib) + add_dependencies(publish_inference publish_inference_x86_cxx_demos) endif() if(LITE_WITH_CUDA) - add_custom_target(publish_inference_cuda_cxx_lib ${TARGET} - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/bin" - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/cxx/include" - COMMAND cp "${CMAKE_SOURCE_DIR}/lite/api/paddle_*.h" "${INFER_LITE_PUBLISH_ROOT}/cxx/include" - COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_full_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" - COMMAND cp "${CMAKE_BINARY_DIR}/libpaddle_api_light_bundled.a" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" - COMMAND cp "${CMAKE_BINARY_DIR}/lite/api/*.so" "${INFER_LITE_PUBLISH_ROOT}/cxx/lib" - ) - add_dependencies(publish_inference_cuda_cxx_lib bundle_full_api) - add_dependencies(publish_inference_cuda_cxx_lib bundle_light_api) - add_dependencies(publish_inference_cuda_cxx_lib paddle_full_api_shared) - add_dependencies(publish_inference_cuda_cxx_lib paddle_light_api_shared) - add_dependencies(publish_inference publish_inference_cuda_cxx_lib) - add_custom_target(publish_inference_cuda_cxx_demos ${TARGET} - COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/third_party" - COMMAND cp -r "${CMAKE_BINARY_DIR}/third_party/install/*" "${INFER_LITE_PUBLISH_ROOT}/third_party" COMMAND mkdir -p "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" COMMAND cp -r "${CMAKE_SOURCE_DIR}/lite/demo/cxx/cuda_demo/*" "${INFER_LITE_PUBLISH_ROOT}/demo/cxx" ) - add_dependencies(publish_inference_cuda_cxx_lib publish_inference_cuda_cxx_demos) add_dependencies(publish_inference_cuda_cxx_demos paddle_full_api_shared) -endif(LITE_WITH_CUDA) + add_dependencies(publish_inference publish_inference_cuda_cxx_demos) +endif(LITE_WITH_CUDA) + if (LITE_WITH_LIGHT_WEIGHT_FRAMEWORK AND LITE_WITH_ARM) if (NOT LITE_ON_TINY_PUBLISH) # add cxx lib diff --git a/lite/api/cxx_api_impl.cc b/lite/api/cxx_api_impl.cc index 972210c8f9ea05ba1b041382c43efad64aeacc1b..133b7f7ccf254ca13ab445b0116a684de610896b 100644 --- a/lite/api/cxx_api_impl.cc +++ b/lite/api/cxx_api_impl.cc @@ -31,10 +31,17 @@ namespace lite { void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) { config_ = config; + auto places = config.valid_places(); #ifdef LITE_WITH_CUDA - Env::Init(); + // if kCUDA is included in valid places, it should be initialized first, + // otherwise skip this step. + for (auto &p : places) { + if (p.target == TARGET(kCUDA)) { + Env::Init(); + break; + } + } #endif - auto places = config.valid_places(); std::vector passes{}; auto use_layout_preprocess_pass = config.model_dir().find("OPENCL_PRE_PRECESS"); diff --git a/lite/core/context.h b/lite/core/context.h index 88fe00d0f2aab41cfd3e5562d29f0a8a82598428..b22b59fbeb6a5e25547e18bcc4f62a263c4f165c 100644 --- a/lite/core/context.h +++ b/lite/core/context.h @@ -181,7 +181,11 @@ class Context { Env::Global(); // NOTE: InitOnce should only be used by ContextScheduler void InitOnce() { - cublas_fp32_ = std::make_shared>(); + if (devs.size() > 0) { + cublas_fp32_ = std::make_shared>(); + } else { + LOG(INFO) << "No cuda device(s) found, CUDAContext init failed."; + } } void Init(int dev_id, int exec_stream_id = 0, int io_stream_id = 0) { CHECK_GT(devs.size(), 0UL) diff --git a/lite/core/device_info.h b/lite/core/device_info.h index 1ff8b896a70dc538d2486a24db2625c7b62c600a..5727933f477ae76fbfa89c9aa3e03aec8763d445 100644 --- a/lite/core/device_info.h +++ b/lite/core/device_info.h @@ -142,7 +142,7 @@ class Env { // Get device count count = API::num_devices(); if (count == 0) { - CHECK(false) << "No device found!"; + LOG(INFO) << "No " << TargetToStr(Type) << " device(s) found!"; } else { LOG(INFO) << "Found " << count << " device(s)"; }