未验证 提交 172d4ecb 编写于 作者: C Chen Weihang 提交者: GitHub

remove WITH_DSO compile option (#25444)

上级 bb45af02
......@@ -79,7 +79,6 @@ find_package(Threads REQUIRED)
include(simd)
################################ Exposed Configurations #######################################
option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON)
option(WITH_AVX "Compile PaddlePaddle with AVX intrinsics" ${AVX_FOUND})
option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON)
option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF)
......
......@@ -16,10 +16,6 @@ if(NOT WITH_PYTHON)
add_definitions(-DPADDLE_NO_PYTHON)
endif(NOT WITH_PYTHON)
if(WITH_DSO)
add_definitions(-DPADDLE_USE_DSO)
endif(WITH_DSO)
if(WITH_TESTING)
add_definitions(-DPADDLE_WITH_TESTING)
endif(WITH_TESTING)
......
......@@ -188,12 +188,6 @@ endif()
add_definitions("-DPADDLE_CUDA_BINVER=\"${CUDA_VERSION_MAJOR}${CUDA_VERSION_MINOR}\"")
if(NOT WITH_DSO)
if(WIN32)
set_property(GLOBAL PROPERTY CUDA_MODULES ${CUDNN_LIBRARY} ${CUDA_CUBLAS_LIBRARIES} ${CUDA_curand_LIBRARY} ${CUDA_cusolver_LIBRARY})
endif(WIN32)
endif(NOT WITH_DSO)
# setting nvcc arch flags
select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} ${NVCC_FLAGS_EXTRA}")
......
......@@ -13,10 +13,6 @@ include_directories("/opt/rocm/thrust")
set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -fPIC -DPADDLE_WITH_HIP -std=c++11" )
if(WITH_DSO)
set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_USE_DSO")
endif(WITH_DSO)
if(WITH_TESTING)
set(HIP_HCC_FLAGS "${HIP_HCC_FLAGS} -DPADDLE_WITH_TESTING")
endif(WITH_TESTING)
......
......@@ -37,16 +37,10 @@ find_library(TENSORRT_LIBRARY NAMES ${TR_INFER_LIB} ${TR_INFER_RT}
DOC "Path to TensorRT library.")
if(TENSORRT_INCLUDE_DIR AND TENSORRT_LIBRARY)
if(WITH_DSO)
set(TENSORRT_FOUND ON)
endif(WITH_DSO)
else()
set(TENSORRT_FOUND OFF)
if(WITH_DSO)
message(WARNING "TensorRT is NOT found when WITH_DSO is ON.")
else(WITH_DSO)
message(STATUS "TensorRT is disabled because WITH_DSO is OFF.")
endif(WITH_DSO)
message(STATUS "TensorRT is disabled.")
endif()
if(TENSORRT_FOUND)
......
......@@ -94,10 +94,10 @@ if(WITH_GPU)
endif()
if(WIN32)
if(WITH_GPU AND NOT WITH_DSO)
if(WITH_GPU)
get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES)
target_link_libraries(device_context ${cuda_modules})
endif(WITH_GPU AND NOT WITH_DSO)
endif(WITH_GPU)
endif(WIN32)
nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info)
......
......@@ -36,7 +36,6 @@ extern void *cublas_dso_handle;
*
* note: default dynamic linked libs
*/
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \
struct DynLoad__##__name { \
using FUNC_TYPE = decltype(&::__name); \
......@@ -50,16 +49,6 @@ extern void *cublas_dso_handle;
} \
}; \
extern DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_CUBLAS_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
inline cublasStatus_t operator()(Args... args) { \
return ::__name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
#define CUBLAS_BLAS_ROUTINE_EACH(__macro) \
__macro(cublasSaxpy_v2); \
......
......@@ -25,14 +25,10 @@ void* cuda_dso_handle = nullptr;
CUDA_ROUTINE_EACH(DEFINE_WRAP);
#ifdef PADDLE_USE_DSO
bool HasCUDADriver() {
std::call_once(cuda_dso_flag, []() { cuda_dso_handle = GetCUDADsoHandle(); });
return cuda_dso_handle != nullptr;
}
#else
bool HasCUDADriver() { return false; }
#endif
} // namespace dynload
} // namespace platform
......
......@@ -27,8 +27,6 @@ extern std::once_flag cuda_dso_flag;
extern void* cuda_dso_handle;
extern bool HasCUDADriver();
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_CUDA_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -43,19 +41,6 @@ extern bool HasCUDADriver();
}; \
extern struct DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_CUDA_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
inline auto operator()(Args... args) { \
return ::__name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
/**
* include all needed cuda driver functions
**/
......
......@@ -50,7 +50,6 @@ CUDNN_DNN_ROUTINE_EACH_R7(DEFINE_WRAP);
CUDNN_DNN_ROUTINE_EACH_AFTER_R7(DEFINE_WRAP);
#endif
#ifdef PADDLE_USE_DSO
bool HasCUDNN() {
std::call_once(cudnn_dso_flag,
[]() { cudnn_dso_handle = GetCUDNNDsoHandle(); });
......@@ -64,9 +63,6 @@ void EnforceCUDNNLoaded(const char* fn_name) {
"Cannot load cudnn shared library. Cannot invoke method %s.",
fn_name));
}
#else
bool HasCUDNN() { return true; }
#endif
} // namespace dynload
} // namespace platform
......
......@@ -28,8 +28,6 @@ extern std::once_flag cudnn_dso_flag;
extern void* cudnn_dso_handle;
extern bool HasCUDNN();
#ifdef PADDLE_USE_DSO
extern void EnforceCUDNNLoaded(const char* fn_name);
#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \
struct DynLoad__##__name { \
......@@ -46,19 +44,6 @@ extern void EnforceCUDNNLoaded(const char* fn_name);
}; \
extern struct DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_CUDNN_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
inline auto operator()(Args... args) { \
return ::__name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
/**
* include all needed cudnn functions in HPPL
* different cudnn version has different interfaces
......
......@@ -36,7 +36,6 @@ extern void *cupti_dso_handle;
*
* note: default dynamic linked libs
*/
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -50,16 +49,6 @@ extern void *cupti_dso_handle;
} \
}; \
extern DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_CUPTI_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
inline CUptiResult CUPTIAPI operator()(Args... args) { \
return __name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
#define CUPTI_ROUTINE_EACH(__macro) \
__macro(cuptiActivityEnable); \
......
......@@ -25,7 +25,7 @@ namespace platform {
namespace dynload {
extern std::once_flag curand_dso_flag;
extern void *curand_dso_handle;
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -39,16 +39,6 @@ extern void *curand_dso_handle;
} \
}; \
extern DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_CURAND_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
curandStatus_t operator()(Args... args) { \
return ::__name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
#define CURAND_RAND_ROUTINE_EACH(__macro) \
__macro(curandCreateGenerator); \
......
......@@ -26,7 +26,7 @@ namespace platform {
namespace dynload {
extern std::once_flag cusolver_dso_flag;
extern void *cusolver_dso_handle;
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_CUSOLVER_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -41,16 +41,6 @@ extern void *cusolver_dso_handle;
} \
}; \
extern DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_CUSOLVER_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
cusolverStatus_t operator()(Args... args) { \
return ::__name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
#define CUSOLVER_ROUTINE_EACH(__macro) \
__macro(cusolverDnCreate); \
......
......@@ -26,8 +26,6 @@ namespace dynload {
extern std::once_flag nccl_dso_flag;
extern void* nccl_dso_handle;
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -41,16 +39,6 @@ extern void* nccl_dso_handle;
} \
}; \
extern DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_NCCL_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
ncclResult_t operator()(Args... args) { \
return __name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
#define NCCL_RAND_ROUTINE_EACH(__macro) \
__macro(ncclCommInitAll); \
......
......@@ -25,15 +25,11 @@ void* nvrtc_dso_handle = nullptr;
NVRTC_ROUTINE_EACH(DEFINE_WRAP);
#ifdef PADDLE_USE_DSO
bool HasNVRTC() {
std::call_once(nvrtc_dso_flag,
[]() { nvrtc_dso_handle = GetNVRTCDsoHandle(); });
return nvrtc_dso_handle != nullptr;
}
#else
bool HasNVRTC() { return false; }
#endif
} // namespace dynload
} // namespace platform
......
......@@ -27,8 +27,6 @@ extern std::once_flag nvrtc_dso_flag;
extern void* nvrtc_dso_handle;
extern bool HasNVRTC();
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_NVRTC_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -43,19 +41,6 @@ extern bool HasNVRTC();
}; \
extern struct DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_NVRTC_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
inline auto operator()(Args... args) { \
return ::__name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
/**
* include all needed nvrtc functions
**/
......
......@@ -30,8 +30,6 @@ namespace dynload {
extern std::once_flag tensorrt_dso_flag;
extern void* tensorrt_dso_handle;
#ifdef PADDLE_USE_DSO
#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
......@@ -49,17 +47,6 @@ extern void* tensorrt_dso_handle;
}; \
extern DynLoad__##__name __name
#else
#define DECLARE_DYNAMIC_LOAD_TENSORRT_WRAP(__name) \
struct DynLoad__##__name { \
template <typename... Args> \
tensorrtResult_t operator()(Args... args) { \
return __name(args...); \
} \
}; \
extern DynLoad__##__name __name
#endif
#define TENSORRT_RAND_ROUTINE_EACH(__macro) \
__macro(createInferBuilder_INTERNAL); \
__macro(createInferRuntime_INTERNAL);
......
......@@ -193,7 +193,6 @@ function cmake_base() {
Configuring cmake in /paddle/build ...
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release}
${PYTHON_FLAGS}
-DWITH_DSO=ON
-DWITH_GPU=${WITH_GPU:-OFF}
-DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF}
-DWITH_DISTRIBUTE=${distibuted_flag}
......@@ -222,7 +221,6 @@ EOF
cmake .. \
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} \
${PYTHON_FLAGS} \
-DWITH_DSO=ON \
-DWITH_GPU=${WITH_GPU:-OFF} \
-DWITH_AMD_GPU=${WITH_AMD_GPU:-OFF} \
-DWITH_DISTRIBUTE=${distibuted_flag} \
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册