未验证 提交 59f89236 编写于 作者: W Wilber 提交者: GitHub

fix cmake-lint problems. (#43406)

* cmake-lint

* update
上级 d74d1838
...@@ -81,64 +81,3 @@ repos: ...@@ -81,64 +81,3 @@ repos:
- id: cmakelint - id: cmakelint
args: [--config=./tools/codestyle/.cmakelintrc] args: [--config=./tools/codestyle/.cmakelintrc]
# exclude files which need to be fixed # exclude files which need to be fixed
exclude: |
(?x)^(
CMakeLists.txt|
python/paddle/fluid/tests/unittests/CMakeLists.txt|
paddle/fluid/inference/tests/infer_ut/CMakeLists.txt|
cmake/configure.cmake|
paddle/fluid/inference/api/demo_ci/CMakeLists.txt|
cmake/flags.cmake|
cmake/inference_lib.cmake|
cmake/external/protobuf.cmake|
paddle/fluid/framework/fleet/CMakeLists.txt|
paddle/fluid/inference/CMakeLists.txt|
paddle/fluid/inference/tests/api/CMakeLists.txt|
paddle/fluid/operators/CMakeLists.txt|
cmake/external/lite.cmake|
cmake/external/poplar.cmake|
cmake/python_module.cmake|
python/paddle/fluid/tests/unittests/asp/CMakeLists.txt|
cmake/cuda.cmake|
cmake/FindNumPy.cmake|
cmake/coveralls.cmake|
cmake/external/glog.cmake|
cmake/external/onnxruntime.cmake|
cmake/external/openblas.cmake|
cmake/external/xpu.cmake|
cmake/hip.cmake|
paddle/fluid/inference/analysis/ir_passes/CMakeLists.txt|
paddle/fluid/inference/api/CMakeLists.txt|
paddle/fluid/operators/controlflow/CMakeLists.txt|
python/paddle/fluid/tests/unittests/distributed_passes/CMakeLists.txt|
cmake/operators.cmake|
cmake/tensorrt.cmake|
paddle/fluid/inference/api/details/CMakeLists.txt|
python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt|
cmake/external/arm_brpc.cmake|
cmake/external/concurrentqueue.cmake|
cmake/external/eigen.cmake|
cmake/external/mklml.cmake|
cmake/external/paddle2onnx.cmake|
cmake/miopen.cmake|
cmake/nccl.cmake|
cmake/simd.cmake|
paddle/fluid/inference/analysis/CMakeLists.txt|
paddle/fluid/inference/tests/infer_ut/external-cmake/gtest-cpp.cmake|
paddle/fluid/memory/allocation/CMakeLists.txt|
paddle/fluid/memory/CMakeLists.txt|
paddle/fluid/operators/cinn/CMakeLists.txt|
paddle/infrt/external_kernels/CMakeLists.txt|
paddle/infrt/kernel/phi/CMakeLists.txt|
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt|
python/paddle/fluid/tests/unittests/autograd/CMakeLists.txt|
python/paddle/fluid/tests/unittests/distribution/CMakeLists.txt|
python/paddle/fluid/tests/unittests/dygraph_to_static/CMakeLists.txt|
python/paddle/fluid/tests/unittests/fft/CMakeLists.txt|
python/paddle/fluid/tests/unittests/ipu/CMakeLists.txt|
python/paddle/fluid/tests/unittests/mkldnn/CMakeLists.txt|
python/paddle/fluid/tests/unittests/npu/CMakeLists.txt|
python/paddle/fluid/tests/unittests/ps/CMakeLists.txt|
python/paddle/fluid/tests/unittests/rnn/CMakeLists.txt|
python/paddle/fluid/tests/unittests/sequence/CMakeLists.txt
)$
...@@ -16,10 +16,10 @@ if(APPLE AND WITH_ARM) ...@@ -16,10 +16,10 @@ if(APPLE AND WITH_ARM)
# cmake 3.19.2 version starts to support M1 # cmake 3.19.2 version starts to support M1
cmake_minimum_required(VERSION 3.19.2) cmake_minimum_required(VERSION 3.19.2)
cmake_policy(VERSION 3.19.2) cmake_policy(VERSION 3.19.2)
else(APPLE AND WITH_ARM) else()
cmake_minimum_required(VERSION 3.15) cmake_minimum_required(VERSION 3.15)
cmake_policy(VERSION 3.10) cmake_policy(VERSION 3.10)
endif(APPLE AND WITH_ARM) endif()
# use to get_property location of static lib # use to get_property location of static lib
# https://cmake.org/cmake/help/v3.0/policy/CMP0026.html?highlight=cmp0026 # https://cmake.org/cmake/help/v3.0/policy/CMP0026.html?highlight=cmp0026
cmake_policy(SET CMP0026 OLD) cmake_policy(SET CMP0026 OLD)
...@@ -152,7 +152,7 @@ if(WIN32) ...@@ -152,7 +152,7 @@ if(WIN32)
if(${flag_var} MATCHES "/MD") if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif() endif()
endforeach(flag_var) endforeach()
endif() endif()
# NOTE(zhouwei): msvc max/min macro conflict with std::min/max, define NOMINMAX globally # NOTE(zhouwei): msvc max/min macro conflict with std::min/max, define NOMINMAX globally
...@@ -179,10 +179,10 @@ if(WIN32) ...@@ -179,10 +179,10 @@ if(WIN32)
math(EXPR PROCESS_MAX "${CPU_CORES} * 2 / 3") math(EXPR PROCESS_MAX "${CPU_CORES} * 2 / 3")
set(${flag_var} "${${flag_var}} /MP${PROCESS_MAX}") set(${flag_var} "${${flag_var}} /MP${PROCESS_MAX}")
endif() endif()
endforeach(flag_var) endforeach()
foreach(flag_var CMAKE_CXX_FLAGS CMAKE_C_FLAGS) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_C_FLAGS)
set(${flag_var} "${${flag_var}} /w") set(${flag_var} "${${flag_var}} /w")
endforeach(flag_var) endforeach()
# Windows Remove /Zi, /ZI for Release, MinSizeRel builds # Windows Remove /Zi, /ZI for Release, MinSizeRel builds
foreach(flag_var foreach(flag_var
...@@ -191,7 +191,7 @@ if(WIN32) ...@@ -191,7 +191,7 @@ if(WIN32)
if(${flag_var} MATCHES "/Z[iI]") if(${flag_var} MATCHES "/Z[iI]")
string(REGEX REPLACE "/Z[iI]" "" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/Z[iI]" "" ${flag_var} "${${flag_var}}")
endif() endif()
endforeach(flag_var) endforeach()
set(CMAKE_C_FLAGS set(CMAKE_C_FLAGS
"${CMAKE_C_FLAGS} /wd4068 /wd4129 /wd4244 /wd4267 /wd4297 /wd4530 /wd4577 /wd4819 /wd4838" "${CMAKE_C_FLAGS} /wd4068 /wd4129 /wd4244 /wd4267 /wd4297 /wd4530 /wd4577 /wd4819 /wd4838"
...@@ -207,7 +207,7 @@ if(WIN32) ...@@ -207,7 +207,7 @@ if(WIN32)
if(MSVC_STATIC_CRT) if(MSVC_STATIC_CRT)
set(${flag_var} "${${flag_var}} /NODEFAULTLIB:MSVCRT.LIB") set(${flag_var} "${${flag_var}} /NODEFAULTLIB:MSVCRT.LIB")
endif() endif()
endforeach(flag_var) endforeach()
if(WITH_WIN_DUMP_DBG) if(WITH_WIN_DUMP_DBG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Zi") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Zi")
...@@ -216,16 +216,16 @@ if(WIN32) ...@@ -216,16 +216,16 @@ if(WIN32)
foreach(flag_var CMAKE_SHARED_LINKER_FLAGS CMAKE_STATIC_LINKER_FLAGS foreach(flag_var CMAKE_SHARED_LINKER_FLAGS CMAKE_STATIC_LINKER_FLAGS
CMAKE_EXE_LINKER_FLAGS CMAKE_LINKER_FLAGS) CMAKE_EXE_LINKER_FLAGS CMAKE_LINKER_FLAGS)
set(${flag_var} "${${flag_var}} /DEBUG /OPT:REF /OPT:ICF") set(${flag_var} "${${flag_var}} /DEBUG /OPT:REF /OPT:ICF")
endforeach(flag_var) endforeach()
add_definitions("-DWITH_WIN_DUMP_DBG") add_definitions("-DWITH_WIN_DUMP_DBG")
endif() endif()
else(WIN32) else()
set(CMAKE_CXX_FLAGS set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations -Wno-deprecated-declarations" "${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations -Wno-deprecated-declarations"
) )
endif(WIN32) endif()
find_package(Git REQUIRED) find_package(Git REQUIRED)
...@@ -430,7 +430,7 @@ endif() ...@@ -430,7 +430,7 @@ endif()
if(WITH_ROCM) if(WITH_ROCM)
include(hip) include(hip)
include(miopen) # set miopen libraries, must before configure include(miopen) # set miopen libraries, must before configure
endif(WITH_ROCM) endif()
if(WITH_XPU_KP) if(WITH_XPU_KP)
include(xpu_kp) include(xpu_kp)
......
...@@ -26,7 +26,7 @@ if(PYTHON_EXECUTABLE) ...@@ -26,7 +26,7 @@ if(PYTHON_EXECUTABLE)
OUTPUT_VARIABLE NUMPY_PATH) OUTPUT_VARIABLE NUMPY_PATH)
elseif(_numpy_out) elseif(_numpy_out)
message(STATUS "Python executable not found.") message(STATUS "Python executable not found.")
endif(PYTHON_EXECUTABLE) endif()
find_path(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h find_path(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h
HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}") HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}")
...@@ -35,7 +35,7 @@ if(PYTHON_NUMPY_INCLUDE_DIR) ...@@ -35,7 +35,7 @@ if(PYTHON_NUMPY_INCLUDE_DIR)
set(PYTHON_NUMPY_FOUND set(PYTHON_NUMPY_FOUND
1 1
CACHE INTERNAL "Python numpy found") CACHE INTERNAL "Python numpy found")
endif(PYTHON_NUMPY_INCLUDE_DIR) endif()
include(FindPackageHandleStandardArgs) include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NumPy DEFAULT_MSG PYTHON_NUMPY_INCLUDE_DIR) find_package_handle_standard_args(NumPy DEFAULT_MSG PYTHON_NUMPY_INCLUDE_DIR)
...@@ -14,19 +14,19 @@ ...@@ -14,19 +14,19 @@
if(NOT WITH_PYTHON) if(NOT WITH_PYTHON)
add_definitions(-DPADDLE_NO_PYTHON) add_definitions(-DPADDLE_NO_PYTHON)
endif(NOT WITH_PYTHON) endif()
if(WITH_TESTING) if(WITH_TESTING)
add_definitions(-DPADDLE_WITH_TESTING) add_definitions(-DPADDLE_WITH_TESTING)
endif(WITH_TESTING) endif()
if(WITH_INFERENCE_API_TEST) if(WITH_INFERENCE_API_TEST)
add_definitions(-DPADDLE_WITH_INFERENCE_API_TEST) add_definitions(-DPADDLE_WITH_INFERENCE_API_TEST)
endif(WITH_INFERENCE_API_TEST) endif()
if(NOT WITH_PROFILER) if(NOT WITH_PROFILER)
add_definitions(-DPADDLE_DISABLE_PROFILER) add_definitions(-DPADDLE_DISABLE_PROFILER)
endif(NOT WITH_PROFILER) endif()
if(WITH_AVX AND AVX_FOUND) if(WITH_AVX AND AVX_FOUND)
set(SIMD_FLAG ${AVX_FLAG}) set(SIMD_FLAG ${AVX_FLAG})
...@@ -60,8 +60,8 @@ if(WIN32) ...@@ -60,8 +60,8 @@ if(WIN32)
FATAL FATAL
"Windows build only support msvc. Which was binded by the nvcc compiler of NVIDIA." "Windows build only support msvc. Which was binded by the nvcc compiler of NVIDIA."
) )
endif(NOT MSVC) endif()
endif(WIN32) endif()
if(WITH_MUSL) if(WITH_MUSL)
add_definitions(-DPADDLE_WITH_MUSL) add_definitions(-DPADDLE_WITH_MUSL)
...@@ -195,9 +195,9 @@ if(WITH_MKLML AND MKLML_IOMP_LIB) ...@@ -195,9 +195,9 @@ if(WITH_MKLML AND MKLML_IOMP_LIB)
if(WIN32) if(WIN32)
# openmp not support well for now on windows # openmp not support well for now on windows
set(OPENMP_FLAGS "") set(OPENMP_FLAGS "")
else(WIN32) else()
set(OPENMP_FLAGS "-fopenmp") set(OPENMP_FLAGS "-fopenmp")
endif(WIN32) endif()
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS}) set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS}) set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
...@@ -221,15 +221,15 @@ endif() ...@@ -221,15 +221,15 @@ endif()
if(WITH_BRPC_RDMA) if(WITH_BRPC_RDMA)
add_definitions(-DPADDLE_WITH_BRPC_RDMA) add_definitions(-DPADDLE_WITH_BRPC_RDMA)
endif(WITH_BRPC_RDMA) endif()
if(ON_INFER) if(ON_INFER)
add_definitions(-DPADDLE_ON_INFERENCE) add_definitions(-DPADDLE_ON_INFERENCE)
endif(ON_INFER) endif()
if(WITH_CRYPTO) if(WITH_CRYPTO)
add_definitions(-DPADDLE_WITH_CRYPTO) add_definitions(-DPADDLE_WITH_CRYPTO)
endif(WITH_CRYPTO) endif()
if(WITH_CUSTOM_DEVICE AND NOT WIN32) if(WITH_CUSTOM_DEVICE AND NOT WIN32)
add_definitions(-DPADDLE_WITH_CUSTOM_DEVICE) add_definitions(-DPADDLE_WITH_CUSTOM_DEVICE)
......
...@@ -96,7 +96,7 @@ if(WITH_COVERAGE) ...@@ -96,7 +96,7 @@ if(WITH_COVERAGE)
if(NOT ${EXCLUDE_DIR_FOUND} EQUAL -1) if(NOT ${EXCLUDE_DIR_FOUND} EQUAL -1)
list(REMOVE_ITEM PADDLE_SOURCES ${TMP_PATH}) list(REMOVE_ITEM PADDLE_SOURCES ${TMP_PATH})
endif() endif()
endforeach(TMP_PATH) endforeach()
endforeach() endforeach()
# convert to absolute path # convert to absolute path
......
...@@ -274,7 +274,7 @@ set(CMAKE_CUDA_STANDARD 14) ...@@ -274,7 +274,7 @@ set(CMAKE_CUDA_STANDARD 14)
# So replace /W[1-4] with /W0 # So replace /W[1-4] with /W0
if(WIN32) if(WIN32)
string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}") string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")
endif(WIN32) endif()
# in cuda9, suppress cuda warning on eigen # in cuda9, suppress cuda warning on eigen
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -w") set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -w")
# Set :expt-relaxed-constexpr to suppress Eigen warnings # Set :expt-relaxed-constexpr to suppress Eigen warnings
...@@ -293,7 +293,7 @@ if(WIN32) ...@@ -293,7 +293,7 @@ if(WIN32)
if(${flag_var} MATCHES "-MD") if(${flag_var} MATCHES "-MD")
string(REGEX REPLACE "-MD" "-MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "-MD" "-MT" ${flag_var} "${${flag_var}}")
endif() endif()
endforeach(flag_var) endforeach()
endif() endif()
endif() endif()
......
...@@ -25,7 +25,8 @@ if(WIN32) ...@@ -25,7 +25,8 @@ if(WIN32)
elseif(LINUX) elseif(LINUX)
if(WITH_ROCM) if(WITH_ROCM)
# For HIPCC Eigen::internal::device::numeric_limits is not EIGEN_DEVICE_FUNC # For HIPCC Eigen::internal::device::numeric_limits is not EIGEN_DEVICE_FUNC
# which will cause compiler error of using __host__ funciont in __host__ __device__ # which will cause compiler error of using __host__ funciont
# in __host__ __device__
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Meta.h native_src) file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Meta.h native_src)
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/util/Meta.h file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/util/Meta.h
native_dst) native_dst)
......
...@@ -28,12 +28,12 @@ if(WIN32) ...@@ -28,12 +28,12 @@ if(WIN32)
CACHE FILEPATH "glog library." FORCE) CACHE FILEPATH "glog library." FORCE)
set(GLOG_CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4267 /wd4530") set(GLOG_CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4267 /wd4530")
add_definitions("/DGOOGLE_GLOG_DLL_DECL=") add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
else(WIN32) else()
set(GLOG_LIBRARIES set(GLOG_LIBRARIES
"${GLOG_INSTALL_DIR}/lib/libglog.a" "${GLOG_INSTALL_DIR}/lib/libglog.a"
CACHE FILEPATH "glog library." FORCE) CACHE FILEPATH "glog library." FORCE)
set(GLOG_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) set(GLOG_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
endif(WIN32) endif()
include_directories(${GLOG_INCLUDE_DIR}) include_directories(${GLOG_INCLUDE_DIR})
......
...@@ -32,7 +32,8 @@ if(WIN32) ...@@ -32,7 +32,8 @@ if(WIN32)
set(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll) set(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll)
else() else()
#TODO(intel-huying): #TODO(intel-huying):
# Now enable csrmm function in mklml library temporarily, it will be updated as offical version later. # Now enable csrmm function in mklml library temporarily,
# it will be updated as offical version later.
set(MKLML_VER set(MKLML_VER
"csrmm_mklml_lnx_2019.0.5" "csrmm_mklml_lnx_2019.0.5"
CACHE STRING "" FORCE) CACHE STRING "" FORCE)
...@@ -51,8 +52,9 @@ message(STATUS "MKLML_VER: ${MKLML_VER}, MKLML_URL: ${MKLML_URL}") ...@@ -51,8 +52,9 @@ message(STATUS "MKLML_VER: ${MKLML_VER}, MKLML_URL: ${MKLML_URL}")
set(MKLML_PREFIX_DIR ${THIRD_PARTY_PATH}/mklml) set(MKLML_PREFIX_DIR ${THIRD_PARTY_PATH}/mklml)
set(MKLML_SOURCE_DIR ${THIRD_PARTY_PATH}/mklml/src/extern_mklml) set(MKLML_SOURCE_DIR ${THIRD_PARTY_PATH}/mklml/src/extern_mklml)
# Ninja Generator can not establish the correct dependency relationship between the imported library with target, # Ninja Generator can not establish the correct dependency relationship
# the product file in the ExternalProject need to be specified manually, please refer to # between the imported library with target, the product file
# in the ExternalProject need to be specified manually, please refer to
# https://stackoverflow.com/questions/54866067/cmake-and-ninja-missing-and-no-known-rule-to-make-it # https://stackoverflow.com/questions/54866067/cmake-and-ninja-missing-and-no-known-rule-to-make-it
# It is the same to all other ExternalProject. # It is the same to all other ExternalProject.
ExternalProject_Add( ExternalProject_Add(
......
...@@ -58,7 +58,7 @@ if(NOT WIN32) ...@@ -58,7 +58,7 @@ if(NOT WIN32)
UPDATE_COMMAND "" UPDATE_COMMAND ""
CONFIGURE_COMMAND "" CONFIGURE_COMMAND ""
BUILD_BYPRODUCTS ${CBLAS_LIBRARIES}) BUILD_BYPRODUCTS ${CBLAS_LIBRARIES})
else(NOT WIN32) else()
set(CBLAS_LIBRARIES set(CBLAS_LIBRARIES
"${CBLAS_INSTALL_DIR}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}" "${CBLAS_INSTALL_DIR}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}"
CACHE FILEPATH "openblas library." FORCE) CACHE FILEPATH "openblas library." FORCE)
...@@ -92,4 +92,4 @@ else(NOT WIN32) ...@@ -92,4 +92,4 @@ else(NOT WIN32)
BUILD_BYPRODUCTS ${CBLAS_LIBRARIES}) BUILD_BYPRODUCTS ${CBLAS_LIBRARIES})
set(OPENBLAS_SHARED_LIB set(OPENBLAS_SHARED_LIB
${CBLAS_INSTALL_DIR}/bin/openblas${CMAKE_SHARED_LIBRARY_SUFFIX}) ${CBLAS_INSTALL_DIR}/bin/openblas${CMAKE_SHARED_LIBRARY_SUFFIX})
endif(NOT WIN32) endif()
...@@ -69,7 +69,7 @@ else() ...@@ -69,7 +69,7 @@ else()
set(PADDLE2ONNX_COMPILE_LIB set(PADDLE2ONNX_COMPILE_LIB
"${PADDLE2ONNX_INSTALL_DIR}/lib/libpaddle2onnx.so" "${PADDLE2ONNX_INSTALL_DIR}/lib/libpaddle2onnx.so"
CACHE FILEPATH "paddle2onnx compile library." FORCE) CACHE FILEPATH "paddle2onnx compile library." FORCE)
endif(WIN32) endif()
if(WIN32) if(WIN32)
set(PADDLE2ONNX_URL set(PADDLE2ONNX_URL
......
...@@ -16,7 +16,7 @@ include(ExternalProject) ...@@ -16,7 +16,7 @@ include(ExternalProject)
# Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp # Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp
if(NOT WIN32) if(NOT WIN32)
find_package(Protobuf QUIET) find_package(Protobuf QUIET)
endif(NOT WIN32) endif()
unset_var(PROTOBUF_INCLUDE_DIR) unset_var(PROTOBUF_INCLUDE_DIR)
unset_var(PROTOBUF_FOUND) unset_var(PROTOBUF_FOUND)
...@@ -147,7 +147,7 @@ set(PROTOBUF_ROOT ...@@ -147,7 +147,7 @@ set(PROTOBUF_ROOT
CACHE PATH "Folder contains protobuf") CACHE PATH "Folder contains protobuf")
if(WIN32) if(WIN32)
set(PROTOBUF_ROOT ${THIRD_PARTY_PATH}/install/protobuf) set(PROTOBUF_ROOT ${THIRD_PARTY_PATH}/install/protobuf)
endif(WIN32) endif()
if(NOT "${PROTOBUF_ROOT}" STREQUAL "") if(NOT "${PROTOBUF_ROOT}" STREQUAL "")
find_path( find_path(
...@@ -349,4 +349,4 @@ if(NOT PROTOBUF_FOUND) ...@@ -349,4 +349,4 @@ if(NOT PROTOBUF_FOUND)
# `protoc.exe` existed before calling it. # `protoc.exe` existed before calling it.
set(EXTERN_PROTOBUF_DEPEND extern_protobuf) set(EXTERN_PROTOBUF_DEPEND extern_protobuf)
prompt_protobuf_lib(extern_protobuf) prompt_protobuf_lib(extern_protobuf)
endif(NOT PROTOBUF_FOUND) endif()
...@@ -134,9 +134,9 @@ if(WITH_XPU_BKCL) ...@@ -134,9 +134,9 @@ if(WITH_XPU_BKCL)
set(XPU_BKCL_INC_DIR "${THIRD_PARTY_PATH}/install/xpu/include") set(XPU_BKCL_INC_DIR "${THIRD_PARTY_PATH}/install/xpu/include")
include_directories(${XPU_BKCL_INC_DIR}) include_directories(${XPU_BKCL_INC_DIR})
target_link_libraries(xpulib ${XPU_API_LIB} ${XPU_RT_LIB} ${XPU_BKCL_LIB}) target_link_libraries(xpulib ${XPU_API_LIB} ${XPU_RT_LIB} ${XPU_BKCL_LIB})
else(WITH_XPU_BKCL) else()
target_link_libraries(xpulib ${XPU_API_LIB} ${XPU_RT_LIB}) target_link_libraries(xpulib ${XPU_API_LIB} ${XPU_RT_LIB})
endif(WITH_XPU_BKCL) endif()
add_dependencies(xpulib ${XPU_PROJECT}) add_dependencies(xpulib ${XPU_PROJECT})
......
...@@ -113,10 +113,10 @@ check_type_size(pthread_spinlock_t SPINLOCK_FOUND) ...@@ -113,10 +113,10 @@ check_type_size(pthread_spinlock_t SPINLOCK_FOUND)
check_type_size(pthread_barrier_t BARRIER_FOUND) check_type_size(pthread_barrier_t BARRIER_FOUND)
if(SPINLOCK_FOUND) if(SPINLOCK_FOUND)
add_definitions(-DPADDLE_USE_PTHREAD_SPINLOCK) add_definitions(-DPADDLE_USE_PTHREAD_SPINLOCK)
endif(SPINLOCK_FOUND) endif()
if(BARRIER_FOUND) if(BARRIER_FOUND)
add_definitions(-DPADDLE_USE_PTHREAD_BARRIER) add_definitions(-DPADDLE_USE_PTHREAD_BARRIER)
endif(BARRIER_FOUND) endif()
set(CMAKE_EXTRA_INCLUDE_FILES "") set(CMAKE_EXTRA_INCLUDE_FILES "")
# Only one sanitizer is allowed in compile time # Only one sanitizer is allowed in compile time
...@@ -180,7 +180,7 @@ if(NOT WIN32) ...@@ -180,7 +180,7 @@ if(NOT WIN32)
-Wno-parentheses # Warning in Eigen gcc 8.3 -Wno-parentheses # Warning in Eigen gcc 8.3
) )
endif() endif()
endif(NOT APPLE) endif()
set(GPU_COMMON_FLAGS set(GPU_COMMON_FLAGS
-fPIC -fPIC
...@@ -200,21 +200,21 @@ if(NOT WIN32) ...@@ -200,21 +200,21 @@ if(NOT WIN32)
AND NOT WITH_MIPS) AND NOT WITH_MIPS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64")
endif() endif()
endif(NOT WIN32) endif()
if(APPLE) if(APPLE)
if(WITH_ARM) if(WITH_ARM)
set(CMAKE_OSX_ARCHITECTURES set(CMAKE_OSX_ARCHITECTURES
"arm64" "arm64"
CACHE STRING "Build architectures for OSX" FORCE) CACHE STRING "Build architectures for OSX" FORCE)
else(WITH_ARM) else()
set(CMAKE_OSX_ARCHITECTURES set(CMAKE_OSX_ARCHITECTURES
"x86_64" "x86_64"
CACHE STRING "Build architectures for OSX" FORCE) CACHE STRING "Build architectures for OSX" FORCE)
endif(WITH_ARM) endif()
# On Mac OS X register class specifier is deprecated and will cause warning error on latest clang 10.0 # On Mac OS X register class specifier is deprecated and will cause warning error on latest clang 10.0
set(COMMON_FLAGS -Wno-deprecated-register) set(COMMON_FLAGS -Wno-deprecated-register)
endif(APPLE) endif()
if(WITH_HETERPS AND WITH_PSLIB) if(WITH_HETERPS AND WITH_PSLIB)
set(COMMON_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0 ${COMMON_FLAGS}) set(COMMON_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0 ${COMMON_FLAGS})
...@@ -224,7 +224,7 @@ endif() ...@@ -224,7 +224,7 @@ endif()
if(LINUX) if(LINUX)
set(GPU_COMMON_FLAGS -Wall -Wextra -Werror ${GPU_COMMON_FLAGS}) set(GPU_COMMON_FLAGS -Wall -Wextra -Werror ${GPU_COMMON_FLAGS})
endif(LINUX) endif()
foreach(flag ${COMMON_FLAGS}) foreach(flag ${COMMON_FLAGS})
safe_set_cflag(CMAKE_C_FLAGS ${flag}) safe_set_cflag(CMAKE_C_FLAGS ${flag})
......
...@@ -112,7 +112,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) ...@@ -112,7 +112,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND HIP_CXX_FLAGS -g2) list(APPEND HIP_CXX_FLAGS -g2)
list(APPEND HIP_CXX_FLAGS -O0) list(APPEND HIP_CXX_FLAGS -O0)
list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling) list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
endif(CMAKE_BUILD_TYPE MATCHES Debug) endif()
set(HIP_HCC_FLAGS ${HIP_CXX_FLAGS}) set(HIP_HCC_FLAGS ${HIP_CXX_FLAGS})
set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS}) set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
......
...@@ -59,14 +59,14 @@ function(copy TARGET) ...@@ -59,14 +59,14 @@ function(copy TARGET)
POST_BUILD POST_BUILD
COMMAND ${PYTHON_EXECUTABLE} ${COPY_SCRIPT_DIR}/copyfile.py COMMAND ${PYTHON_EXECUTABLE} ${COPY_SCRIPT_DIR}/copyfile.py
${native_src} ${native_dst}) ${native_src} ${native_dst})
else(WIN32) #not windows else() #not windows
add_custom_command( add_custom_command(
TARGET ${TARGET} TARGET ${TARGET}
POST_BUILD POST_BUILD
COMMAND mkdir -p "${dst}" COMMAND mkdir -p "${dst}"
COMMAND cp -r "${src}" "${dst}" COMMAND cp -r "${src}" "${dst}"
COMMENT "copying ${src} -> ${dst}") COMMENT "copying ${src} -> ${dst}")
endif(WIN32) # not windows endif() # not windows
endforeach() endforeach()
endfunction() endfunction()
...@@ -265,7 +265,7 @@ if(WIN32) ...@@ -265,7 +265,7 @@ if(WIN32)
DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib)
else(WIN32) else()
set(paddle_inference_lib set(paddle_inference_lib
${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_inference.*) ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_inference.*)
copy( copy(
...@@ -273,7 +273,7 @@ else(WIN32) ...@@ -273,7 +273,7 @@ else(WIN32)
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib} SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib}
DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib) ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib)
endif(WIN32) endif()
copy( copy(
inference_lib_dist inference_lib_dist
...@@ -350,11 +350,11 @@ set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") ...@@ -350,11 +350,11 @@ set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32) if(WIN32)
set(paddle_inference_c_lib set(paddle_inference_c_lib
$<TARGET_FILE_DIR:paddle_inference_c>/paddle_inference_c.*) $<TARGET_FILE_DIR:paddle_inference_c>/paddle_inference_c.*)
else(WIN32) else()
set(paddle_inference_c_lib set(paddle_inference_c_lib
${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi_exp/libpaddle_inference_c.* ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi_exp/libpaddle_inference_c.*
) )
endif(WIN32) endif()
copy( copy(
inference_lib_dist inference_lib_dist
...@@ -436,7 +436,7 @@ set(module "platform") ...@@ -436,7 +436,7 @@ set(module "platform")
set(platform_lib_deps profiler_proto errors) set(platform_lib_deps profiler_proto errors)
if(WITH_GPU) if(WITH_GPU)
set(platform_lib_deps ${platform_lib_deps} external_error_proto) set(platform_lib_deps ${platform_lib_deps} external_error_proto)
endif(WITH_GPU) endif()
add_dependencies(fluid_lib_dist ${platform_lib_deps}) add_dependencies(fluid_lib_dist ${platform_lib_deps})
copy( copy(
......
...@@ -65,10 +65,9 @@ macro(find_miopen_version miopen_header_file) ...@@ -65,10 +65,9 @@ macro(find_miopen_version miopen_header_file)
math(EXPR MIOPEN_VERSION "${MIOPEN_MAJOR_VERSION} * 1000 + math(EXPR MIOPEN_VERSION "${MIOPEN_MAJOR_VERSION} * 1000 +
${MIOPEN_MINOR_VERSION} * 10 + ${MIOPEN_PATCH_VERSION}") ${MIOPEN_MINOR_VERSION} * 10 + ${MIOPEN_PATCH_VERSION}")
message( message(
STATUS STATUS "Current MIOpen header is ${MIOPEN_INCLUDE_DIR}/miopen/miopen.h "
"Current MIOpen header is ${MIOPEN_INCLUDE_DIR}/miopen/miopen.h " "Current MIOpen version is v${MIOPEN_MAJOR_VERSION}.\
"Current MIOpen version is v${MIOPEN_MAJOR_VERSION}.${MIOPEN_MINOR_VERSION}.${MIOPEN_PATCH_VERSION}. " ${MIOPEN_MINOR_VERSION}.${MIOPEN_PATCH_VERSION}. ")
)
endif() endif()
endmacro() endmacro()
......
...@@ -50,10 +50,8 @@ if(WITH_NCCL) ...@@ -50,10 +50,8 @@ if(WITH_NCCL)
endif() endif()
add_definitions("-DNCCL_VERSION_CODE=$NCCL_VERSION") add_definitions("-DNCCL_VERSION_CODE=$NCCL_VERSION")
message( message(STATUS "Current NCCL header is ${NCCL_INCLUDE_DIR}/nccl.h. "
STATUS "Current NCCL version is \
"Current NCCL header is ${NCCL_INCLUDE_DIR}/nccl.h. " v${NCCL_MAJOR_VERSION}.${NCCL_MINOR_VERSION}.${NCCL_PATCH_VERSION} ")
"Current NCCL version is v${NCCL_MAJOR_VERSION}.${NCCL_MINOR_VERSION}.${NCCL_PATCH_VERSION} "
)
endif() endif()
endif() endif()
...@@ -217,7 +217,7 @@ function(op_library TARGET) ...@@ -217,7 +217,7 @@ function(op_library TARGET)
return() return()
endif() endif()
endforeach() endforeach()
endif(WIN32) endif()
# Unity Build relies on global option `WITH_UNITY_BUILD` and local option `UNITY`. # Unity Build relies on global option `WITH_UNITY_BUILD` and local option `UNITY`.
if(WITH_UNITY_BUILD AND op_library_UNITY) if(WITH_UNITY_BUILD AND op_library_UNITY)
......
...@@ -22,8 +22,8 @@ function(find_python_module module) ...@@ -22,8 +22,8 @@ function(find_python_module module)
set(PY_${module_upper} set(PY_${module_upper}
${_${module}_location} ${_${module}_location}
CACHE STRING "Location of Python module ${module}") CACHE STRING "Location of Python module ${module}")
endif(NOT _${module}_status) endif()
endif(NOT PY_${module_upper}) endif()
find_package_handle_standard_args(PY_${module} DEFAULT_MSG PY_${module_upper}) find_package_handle_standard_args(PY_${module} DEFAULT_MSG PY_${module_upper})
if(NOT PY_${module_upper}_FOUND AND ${module}_FIND_REQUIRED) if(NOT PY_${module_upper}_FOUND AND ${module}_FIND_REQUIRED)
message(FATAL_ERROR "python module ${module} is not found") message(FATAL_ERROR "python module ${module} is not found")
...@@ -39,7 +39,7 @@ function(find_python_module module) ...@@ -39,7 +39,7 @@ function(find_python_module module)
set(PY_${module_upper}_VERSION set(PY_${module_upper}_VERSION
${_${module}_version} ${_${module}_version}
CACHE STRING "Version of Python module ${module}") CACHE STRING "Version of Python module ${module}")
endif(NOT _${module}_status) endif()
set(PY_${module_upper}_FOUND set(PY_${module_upper}_FOUND
${PY_${module_upper}_FOUND} ${PY_${module_upper}_FOUND}
...@@ -47,4 +47,4 @@ function(find_python_module module) ...@@ -47,4 +47,4 @@ function(find_python_module module)
set(PY_${module_upper}_VERSION set(PY_${module_upper}_VERSION
${PY_${module_upper}_VERSION} ${PY_${module_upper}_VERSION}
PARENT_SCOPE) PARENT_SCOPE)
endfunction(find_python_module) endfunction()
...@@ -81,10 +81,10 @@ check_cxx_source_runs( ...@@ -81,10 +81,10 @@ check_cxx_source_runs(
#include <immintrin.h> #include <immintrin.h>
int main() int main()
{ {
__m256 a = _mm256_set_ps (-1.0f, 2.0f, -3.0f, 4.0f, -1.0f, 2.0f, -3.0f, 4.0f); __m256 a = _mm256_set_ps(-1.0f, 2.0f, -3.0f, 4.0f, -1.0f, 2.0f, -3.0f, 4.0f);
__m256 b = _mm256_set_ps (1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f); __m256 b = _mm256_set_ps(1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f);
__m256 result = _mm256_add_ps (a, b); __m256 result = _mm256_add_ps(a, b);
return 0; return 0;
}" }"
AVX_FOUND) AVX_FOUND)
......
...@@ -5,7 +5,7 @@ if(WITH_PSLIB) ...@@ -5,7 +5,7 @@ if(WITH_PSLIB)
if(NOT WITH_HETERPS) if(NOT WITH_HETERPS)
set(BRPC_DEPS brpc) set(BRPC_DEPS brpc)
endif() endif()
endif(WITH_PSLIB_BRPC) endif()
cc_library( cc_library(
fleet_wrapper fleet_wrapper
SRCS fleet_wrapper.cc SRCS fleet_wrapper.cc
...@@ -21,7 +21,7 @@ else() ...@@ -21,7 +21,7 @@ else()
fleet_wrapper fleet_wrapper
SRCS fleet_wrapper.cc SRCS fleet_wrapper.cc
DEPS framework_proto variable_helper scope) DEPS framework_proto variable_helper scope)
endif(WITH_PSLIB) endif()
if(WITH_HETERPS) if(WITH_HETERPS)
if(WITH_NCCL AND WITH_GPU) if(WITH_NCCL AND WITH_GPU)
...@@ -48,7 +48,7 @@ else() ...@@ -48,7 +48,7 @@ else()
ps_gpu_wrapper ps_gpu_wrapper
SRCS ps_gpu_wrapper.cc SRCS ps_gpu_wrapper.cc
DEPS gloo_wrapper) DEPS gloo_wrapper)
endif(WITH_HETERPS) endif()
if(WITH_NCCL OR WITH_RCCL) if(WITH_NCCL OR WITH_RCCL)
cc_library( cc_library(
...@@ -74,7 +74,7 @@ else() ...@@ -74,7 +74,7 @@ else()
box_wrapper box_wrapper
SRCS box_wrapper.cc SRCS box_wrapper.cc
DEPS framework_proto lod_tensor) DEPS framework_proto lod_tensor)
endif(WITH_BOX_PS) endif()
if(WITH_GLOO) if(WITH_GLOO)
cc_library( cc_library(
...@@ -94,7 +94,7 @@ else() ...@@ -94,7 +94,7 @@ else()
metrics metrics
SRCS metrics.cc SRCS metrics.cc
DEPS gloo_wrapper) DEPS gloo_wrapper)
endif(WITH_GLOO) endif()
if(WITH_PSLIB) if(WITH_PSLIB)
set(DISTRIBUTE_COMPILE_FLAGS set(DISTRIBUTE_COMPILE_FLAGS
......
...@@ -97,7 +97,7 @@ set(SHARED_INFERENCE_DEPS ${fluid_modules} phi analysis_predictor ...@@ -97,7 +97,7 @@ set(SHARED_INFERENCE_DEPS ${fluid_modules} phi analysis_predictor
if(WITH_CRYPTO) if(WITH_CRYPTO)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} paddle_crypto) set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} paddle_crypto)
endif(WITH_CRYPTO) endif()
if(WITH_PSCORE) if(WITH_PSCORE)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} fleet ps_service set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} fleet ps_service
...@@ -108,7 +108,7 @@ if(WITH_ONNXRUNTIME) ...@@ -108,7 +108,7 @@ if(WITH_ONNXRUNTIME)
set(SHARED_INFERENCE_SRCS set(SHARED_INFERENCE_SRCS
${SHARED_INFERENCE_SRCS} ${SHARED_INFERENCE_SRCS}
${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc) ${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc)
endif(WITH_ONNXRUNTIME) endif()
# Create shared inference library # Create shared inference library
cc_library( cc_library(
......
...@@ -87,7 +87,7 @@ function(inference_analysis_test TARGET) ...@@ -87,7 +87,7 @@ function(inference_analysis_test TARGET)
inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS
${analysis_test_ARGS}) ${analysis_test_ARGS})
endif() endif()
endfunction(inference_analysis_test) endfunction()
if(NOT APPLE AND NOT WIN32) if(NOT APPLE AND NOT WIN32)
inference_analysis_test( inference_analysis_test(
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
if(APPLE) if(APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move")
endif(APPLE) endif()
add_subdirectory(details) add_subdirectory(details)
...@@ -84,14 +84,14 @@ if(WITH_ONNXRUNTIME) ...@@ -84,14 +84,14 @@ if(WITH_ONNXRUNTIME)
infer_io_utils infer_io_utils
onnxruntime onnxruntime
paddle2onnx) paddle2onnx)
else(WITH_ONNXRUNTIME) else()
cc_library( cc_library(
analysis_predictor analysis_predictor
SRCS analysis_predictor.cc resource_manager.cc infer_context.cc SRCS analysis_predictor.cc resource_manager.cc infer_context.cc
${mkldnn_quantizer_src} ${mkldnn_quantizer_src}
DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info
infer_io_utils) infer_io_utils)
endif(WITH_ONNXRUNTIME) endif()
cc_test( cc_test(
test_paddle_inference_api test_paddle_inference_api
......
...@@ -21,8 +21,8 @@ macro(safe_set_static_flag) ...@@ -21,8 +21,8 @@ macro(safe_set_static_flag)
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD") if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD") endif()
endforeach(flag_var) endforeach()
endmacro() endmacro()
if(NOT DEFINED PADDLE_LIB) if(NOT DEFINED PADDLE_LIB)
...@@ -105,7 +105,7 @@ if(WITH_GPU) ...@@ -105,7 +105,7 @@ if(WITH_GPU)
endif() endif()
endif() endif()
message(STATUS "Current CUDA lib path: ${CUDA_LIB}") message(STATUS "Current CUDA lib path: ${CUDA_LIB}")
endif(NOT WIN32) endif()
endif() endif()
if(USE_TENSORRT AND WITH_GPU) if(USE_TENSORRT AND WITH_GPU)
...@@ -157,9 +157,9 @@ if(WITH_MKL) ...@@ -157,9 +157,9 @@ if(WITH_MKL)
include_directories("${MKLDNN_PATH}/include") include_directories("${MKLDNN_PATH}/include")
if(WIN32) if(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else(WIN32) else()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif(WIN32) endif()
endif() endif()
else() else()
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
...@@ -232,7 +232,7 @@ else() ...@@ -232,7 +232,7 @@ else()
utf8proc_static utf8proc_static
${EXTERNAL_LIB}) ${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib) set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32) endif()
if(WITH_GPU) if(WITH_GPU)
if(NOT WIN32) if(NOT WIN32)
......
...@@ -26,13 +26,13 @@ if(WITH_ONNXRUNTIME) ...@@ -26,13 +26,13 @@ if(WITH_ONNXRUNTIME)
zero_copy_tensor_dummy zero_copy_tensor_dummy
SRCS zero_copy_tensor_dummy.cc SRCS zero_copy_tensor_dummy.cc
DEPS onnxruntime) DEPS onnxruntime)
else(WITH_ONNXRUNTIME) else()
cc_library( cc_library(
zero_copy_tensor zero_copy_tensor
SRCS zero_copy_tensor.cc SRCS zero_copy_tensor.cc
DEPS scope lod_tensor enforce) DEPS scope lod_tensor enforce)
cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc) cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc)
endif(WITH_ONNXRUNTIME) endif()
cc_test( cc_test(
zero_copy_tensor_test zero_copy_tensor_test
......
...@@ -799,7 +799,7 @@ if(WITH_MKLDNN) ...@@ -799,7 +799,7 @@ if(WITH_MKLDNN)
if(NOT LINUX) if(NOT LINUX)
download_quant_data_without_verify(${QUANT2_MobileNetV1_MODEL_DIR} download_quant_data_without_verify(${QUANT2_MobileNetV1_MODEL_DIR}
"MobileNet_qat_perf.tar.gz") "MobileNet_qat_perf.tar.gz")
endif(NOT LINUX) endif()
download_quant_data_without_verify(${QUANT2_INT8_MobileNetV1_MODEL_DIR} download_quant_data_without_verify(${QUANT2_INT8_MobileNetV1_MODEL_DIR}
"MobileNet_qat_perf_int8.tar.gz") "MobileNet_qat_perf_int8.tar.gz")
inference_analysis_api_quant_test_run( inference_analysis_api_quant_test_run(
...@@ -829,7 +829,7 @@ if(WITH_MKLDNN) ...@@ -829,7 +829,7 @@ if(WITH_MKLDNN)
download_quant_data_without_verify( download_quant_data_without_verify(
${QUANT2_RESNET50_CHANNELWISE_MODEL_DIR} ${QUANT2_RESNET50_CHANNELWISE_MODEL_DIR}
${QUANT2_RESNET50_CHANNELWISE_MODEL_ARCHIVE}) ${QUANT2_RESNET50_CHANNELWISE_MODEL_ARCHIVE})
endif(NOT LINUX) endif()
set(QUANT2_RESNET50_MODEL set(QUANT2_RESNET50_MODEL
${QUANT2_RESNET50_CHANNELWISE_MODEL_DIR}/ResNet50_qat_channelwise) ${QUANT2_RESNET50_CHANNELWISE_MODEL_DIR}/ResNet50_qat_channelwise)
inference_analysis_api_quant_test_run( inference_analysis_api_quant_test_run(
......
...@@ -22,8 +22,8 @@ macro(safe_set_static_flag) ...@@ -22,8 +22,8 @@ macro(safe_set_static_flag)
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD") if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}") string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD") endif()
endforeach(flag_var) endforeach()
endmacro() endmacro()
if(NOT DEFINED PADDLE_LIB) if(NOT DEFINED PADDLE_LIB)
...@@ -106,7 +106,7 @@ if(WITH_GPU) ...@@ -106,7 +106,7 @@ if(WITH_GPU)
endif() endif()
endif() endif()
message(STATUS "Current CUDA lib path: ${CUDA_LIB}") message(STATUS "Current CUDA lib path: ${CUDA_LIB}")
endif(NOT WIN32) endif()
endif() endif()
if(USE_TENSORRT AND WITH_GPU) if(USE_TENSORRT AND WITH_GPU)
...@@ -182,9 +182,9 @@ if(WITH_MKL) ...@@ -182,9 +182,9 @@ if(WITH_MKL)
include_directories("${MKLDNN_PATH}/include") include_directories("${MKLDNN_PATH}/include")
if(WIN32) if(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else(WIN32) else()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif(WIN32) endif()
endif() endif()
else() else()
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas") set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
...@@ -255,7 +255,7 @@ else() ...@@ -255,7 +255,7 @@ else()
cryptopp-static cryptopp-static
${EXTERNAL_LIB}) ${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib) set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32) endif()
if(WITH_GPU) if(WITH_GPU)
if(NOT WIN32) if(NOT WIN32)
...@@ -302,7 +302,7 @@ if(WITH_GTEST) ...@@ -302,7 +302,7 @@ if(WITH_GTEST)
${DEMO_NAME} ${DEMO_NAME}
${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX} ${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX}
) )
endif(WIN32) endif()
endif() endif()
if(WIN32) if(WIN32)
if("${CMAKE_GENERATOR}" MATCHES "Ninja") if("${CMAKE_GENERATOR}" MATCHES "Ninja")
......
...@@ -10,7 +10,8 @@ set(GTEST_REPOSITORY https://github.com/google/googletest.git) ...@@ -10,7 +10,8 @@ set(GTEST_REPOSITORY https://github.com/google/googletest.git)
set(GTEST_TAG release-1.8.1) set(GTEST_TAG release-1.8.1)
include_directories(${GTEST_INCLUDE_DIR}) include_directories(${GTEST_INCLUDE_DIR})
if(WIN32) if(WIN32)
# if use CMAKE_INSTALL_LIBDIR, the path of lib actually is install/gtest/lib/gtest.lib but GTEST_LIBRARIES # if use CMAKE_INSTALL_LIBDIR, the path of lib actually is \
# install/gtest/lib/gtest.lib but GTEST_LIBRARIES
# is install/gtest/gtest.lib # is install/gtest/gtest.lib
set(GTEST_LIBRARIES set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/lib/gtest.lib" "${GTEST_INSTALL_DIR}/lib/gtest.lib"
...@@ -25,7 +26,7 @@ else() ...@@ -25,7 +26,7 @@ else()
set(GTEST_MAIN_LIBRARIES set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest_main.a" "${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest_main.a"
CACHE FILEPATH "gtest main libraries." FORCE) CACHE FILEPATH "gtest main libraries." FORCE)
endif(WIN32) endif()
ExternalProject_Add( ExternalProject_Add(
extern_gtest extern_gtest
PREFIX gtest PREFIX gtest
......
...@@ -47,10 +47,8 @@ if(WITH_GPU) ...@@ -47,10 +47,8 @@ if(WITH_GPU)
if(WITH_TESTING AND TEST stream_safe_cuda_alloc_test) if(WITH_TESTING AND TEST stream_safe_cuda_alloc_test)
set_tests_properties( set_tests_properties(
stream_safe_cuda_alloc_test stream_safe_cuda_alloc_test
PROPERTIES PROPERTIES ENVIRONMENT "FLAGS_use_stream_safe_cuda_allocator=true; \
ENVIRONMENT FLAGS_allocator_strategy=auto_growth")
"FLAGS_use_stream_safe_cuda_allocator=true;FLAGS_allocator_strategy=auto_growth"
)
endif() endif()
endif() endif()
......
...@@ -261,4 +261,4 @@ if(NOT WIN32) ...@@ -261,4 +261,4 @@ if(NOT WIN32)
SRCS cuda_ipc_allocator.cc SRCS cuda_ipc_allocator.cc
DEPS allocator) DEPS allocator)
endif() endif()
endif(NOT WIN32) endif()
include(operators) include(operators)
# solve "math constants not defined" problems caused by the order of inclusion # solve "math constants not defined" problems caused by the order of inclusion
# of <cmath> and the definition of macro _USE_MATH_DEFINES # of <cmath> and the definition of macro _USE_MATH_DEFINES
add_definitions(-D_USE_MATH_DEFINES) add_definitions(-D_USE_MATH_DEFINES)
......
...@@ -10,6 +10,6 @@ message(STATUS "external_kernels_lib: ${external_kernels_lib}") ...@@ -10,6 +10,6 @@ message(STATUS "external_kernels_lib: ${external_kernels_lib}")
add_test( add_test(
NAME run_and_check_external_kernels NAME run_and_check_external_kernels
COMMAND COMMAND
sh -c sh -c "${CMAKE_BINARY_DIR}/infrt/host_context/infrtexec -i ${basic_mlir} \
"${CMAKE_BINARY_DIR}/infrt/host_context/infrtexec -i ${basic_mlir} --shared_libs=${external_kernels_lib} | ${LLVM_PATH}/bin/FileCheck ${basic_mlir}" --shared_libs=${external_kernels_lib} | \
) ${LLVM_PATH}/bin/FileCheck ${basic_mlir}")
...@@ -102,7 +102,8 @@ function(inference_quant_int8_image_classification_test target quant_model_dir ...@@ -102,7 +102,8 @@ function(inference_quant_int8_image_classification_test target quant_model_dir
0.1) 0.1)
endfunction() endfunction()
# set batch_size 10 for UT only (avoid OOM). For whole dataset, use batch_size 25 # set batch_size 10 for UT only (avoid OOM).
# For whole dataset, use batch_size 25
function(inference_quant2_int8_image_classification_test target quant_model_dir function(inference_quant2_int8_image_classification_test target quant_model_dir
fp32_model_dir dataset_path) fp32_model_dir dataset_path)
py_test( py_test(
...@@ -127,7 +128,8 @@ function(inference_quant2_int8_image_classification_test target quant_model_dir ...@@ -127,7 +128,8 @@ function(inference_quant2_int8_image_classification_test target quant_model_dir
0.1) 0.1)
endfunction() endfunction()
# set batch_size 10 for UT only (avoid OOM). For whole dataset, use batch_size 20 # set batch_size 10 for UT only (avoid OOM).
# For whole dataset, use batch_size 20
function( function(
inference_quant2_int8_nlp_test inference_quant2_int8_nlp_test
target target
...@@ -284,7 +286,10 @@ if(LINUX AND WITH_MKLDNN) ...@@ -284,7 +286,10 @@ if(LINUX AND WITH_MKLDNN)
download_quant_model( download_quant_model(
${QUANT_RESNET101_MODEL_DIR} ${QUANT_RESNET101_MODEL_ARCHIVE} ${QUANT_RESNET101_MODEL_DIR} ${QUANT_RESNET101_MODEL_ARCHIVE}
95c6d01e3aeba31c13efb2ba8057d558) 95c6d01e3aeba31c13efb2ba8057d558)
# inference_quant_int8_image_classification_test(test_quant_int8_resnet101_mkldnn ${QUANT_RESNET101_MODEL_DIR}/model ${IMAGENET_DATA_PATH}) # inference_quant_int8_image_classification_test( \
# test_quant_int8_resnet101_mkldnn \
# ${QUANT_RESNET101_MODEL_DIR}/model \
# ${IMAGENET_DATA_PATH})
# Quant GoogleNet # Quant GoogleNet
set(QUANT_GOOGLENET_MODEL_DIR "${QUANT_INSTALL_DIR}/GoogleNet_quant") set(QUANT_GOOGLENET_MODEL_DIR "${QUANT_INSTALL_DIR}/GoogleNet_quant")
...@@ -321,18 +326,24 @@ if(LINUX AND WITH_MKLDNN) ...@@ -321,18 +326,24 @@ if(LINUX AND WITH_MKLDNN)
set(QUANT_VGG16_MODEL_ARCHIVE "VGG16_qat_model.tar.gz") set(QUANT_VGG16_MODEL_ARCHIVE "VGG16_qat_model.tar.gz")
download_quant_model(${QUANT_VGG16_MODEL_DIR} ${QUANT_VGG16_MODEL_ARCHIVE} download_quant_model(${QUANT_VGG16_MODEL_DIR} ${QUANT_VGG16_MODEL_ARCHIVE}
c37e63ca82a102f47be266f8068b0b55) c37e63ca82a102f47be266f8068b0b55)
# inference_quant_int8_image_classification_test(test_quant_int8_vgg16_mkldnn ${QUANT_VGG16_MODEL_DIR}/model ${IMAGENET_DATA_PATH}) # inference_quant_int8_image_classification_test( \
# test_quant_int8_vgg16_mkldnn \
# ${QUANT_VGG16_MODEL_DIR}/model \
# ${IMAGENET_DATA_PATH})
# Quant VGG19 # Quant VGG19
set(QUANT_VGG19_MODEL_DIR "${QUANT_INSTALL_DIR}/VGG19_quant") set(QUANT_VGG19_MODEL_DIR "${QUANT_INSTALL_DIR}/VGG19_quant")
set(QUANT_VGG19_MODEL_ARCHIVE "VGG19_qat_model.tar.gz") set(QUANT_VGG19_MODEL_ARCHIVE "VGG19_qat_model.tar.gz")
download_quant_model(${QUANT_VGG19_MODEL_DIR} ${QUANT_VGG19_MODEL_ARCHIVE} download_quant_model(${QUANT_VGG19_MODEL_DIR} ${QUANT_VGG19_MODEL_ARCHIVE}
62bcd4b6c3ca2af67e8251d1c96ea18f) 62bcd4b6c3ca2af67e8251d1c96ea18f)
# inference_quant_int8_image_classification_test(test_quant_int8_vgg19_mkldnn ${QUANT_VGG19_MODEL_DIR}/model ${IMAGENET_DATA_PATH}) # inference_quant_int8_image_classification_test( \
# test_quant_int8_vgg19_mkldnn ${QUANT_VGG19_MODEL_DIR}/model \
# ${IMAGENET_DATA_PATH})
### Quant2 for image classification ### Quant2 for image classification
# Quant2 ResNet50 with input/output scales in `fake_quantize_moving_average_abs_max` operators, # Quant2 ResNet50 with input/output scales in
# `fake_quantize_moving_average_abs_max` operators,
# with weight scales in `fake_dequantize_max_abs` operators # with weight scales in `fake_dequantize_max_abs` operators
set(QUANT2_RESNET50_MODEL_DIR "${QUANT_INSTALL_DIR}/ResNet50_quant2") set(QUANT2_RESNET50_MODEL_DIR "${QUANT_INSTALL_DIR}/ResNet50_quant2")
set(QUANT2_RESNET50_MODEL_ARCHIVE "ResNet50_qat_perf.tar.gz") set(QUANT2_RESNET50_MODEL_ARCHIVE "ResNet50_qat_perf.tar.gz")
...@@ -345,7 +356,8 @@ if(LINUX AND WITH_MKLDNN) ...@@ -345,7 +356,8 @@ if(LINUX AND WITH_MKLDNN)
${QUANT2_RESNET50_MODEL_DIR}/ResNet50_qat_perf/float ${QUANT2_RESNET50_MODEL_DIR}/ResNet50_qat_perf/float
${FP32_RESNET50_MODEL_DIR}/model ${IMAGENET_DATA_PATH}) ${FP32_RESNET50_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max` operators and the `out_threshold` attributes, # Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max`
# operators and the `out_threshold` attributes,
# with weight scales in `fake_dequantize_max_abs` operators # with weight scales in `fake_dequantize_max_abs` operators
set(QUANT2_RESNET50_RANGE_MODEL_DIR set(QUANT2_RESNET50_RANGE_MODEL_DIR
"${QUANT_INSTALL_DIR}/ResNet50_quant2_range") "${QUANT_INSTALL_DIR}/ResNet50_quant2_range")
...@@ -358,7 +370,8 @@ if(LINUX AND WITH_MKLDNN) ...@@ -358,7 +370,8 @@ if(LINUX AND WITH_MKLDNN)
${QUANT2_RESNET50_RANGE_MODEL_DIR}/ResNet50_qat_range ${QUANT2_RESNET50_RANGE_MODEL_DIR}/ResNet50_qat_range
${FP32_RESNET50_MODEL_DIR}/model ${IMAGENET_DATA_PATH}) ${FP32_RESNET50_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max` operators and the `out_threshold` attributes, # Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max`
# operators and the `out_threshold` attributes,
# with weight scales in `fake_channel_wise_dequantize_max_abs` operators # with weight scales in `fake_channel_wise_dequantize_max_abs` operators
set(QUANT2_RESNET50_CHANNELWISE_MODEL_DIR set(QUANT2_RESNET50_CHANNELWISE_MODEL_DIR
"${QUANT_INSTALL_DIR}/ResNet50_quant2_channelwise") "${QUANT_INSTALL_DIR}/ResNet50_quant2_channelwise")
......
...@@ -367,7 +367,7 @@ if(APPLE) ...@@ -367,7 +367,7 @@ if(APPLE)
if(NOT WITH_DISTRIBUTE) if(NOT WITH_DISTRIBUTE)
list(REMOVE_ITEM TEST_OPS test_desc_clone) list(REMOVE_ITEM TEST_OPS test_desc_clone)
list(REMOVE_ITEM TEST_OPS test_program_code) list(REMOVE_ITEM TEST_OPS test_program_code)
endif(NOT WITH_DISTRIBUTE) endif()
message( message(
WARNING WARNING
"These tests has been disabled in OSX before being fixed:\n test_fuse_elewise_add_act_pass \n test_detection_map_op \n test_dist_se_resnext_*" "These tests has been disabled in OSX before being fixed:\n test_fuse_elewise_add_act_pass \n test_detection_map_op \n test_dist_se_resnext_*"
...@@ -683,7 +683,7 @@ endif() ...@@ -683,7 +683,7 @@ endif()
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
set_tests_properties(test_logcumsumexp_op PROPERTIES TIMEOUT 30) set_tests_properties(test_logcumsumexp_op PROPERTIES TIMEOUT 30)
py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS
FLAGS_inner_op_parallelism=4) FLAGS_inner_op_parallelism=4)
...@@ -873,8 +873,8 @@ if(WITH_DISTRIBUTE) ...@@ -873,8 +873,8 @@ if(WITH_DISTRIBUTE)
test_fleet_localsgd_meta_optimizer ENVS ${dist_ENVS}) test_fleet_localsgd_meta_optimizer ENVS ${dist_ENVS})
endif() endif()
endif(NOT WIN32) endif()
endif(NOT APPLE) endif()
if(WITH_DGC) if(WITH_DGC)
# if with dgc, test all dgc tests. # if with dgc, test all dgc tests.
# NOTE. dist dgc tests is already in DIST_TEST_OPS # NOTE. dist dgc tests is already in DIST_TEST_OPS
...@@ -938,7 +938,7 @@ if(WITH_DISTRIBUTE) ...@@ -938,7 +938,7 @@ if(WITH_DISTRIBUTE)
message( message(
FATAL_ERROR "available ports have been exhausted:${dist_ut_port}") FATAL_ERROR "available ports have been exhausted:${dist_ut_port}")
endif() endif()
endforeach(TEST_OP) endforeach()
# solve it later. # solve it later.
bash_test_modules( bash_test_modules(
test_fleet_launch_ps test_fleet_launch_ps
...@@ -974,7 +974,7 @@ if(WITH_DISTRIBUTE) ...@@ -974,7 +974,7 @@ if(WITH_DISTRIBUTE)
"PADDLE_DIST_UT_PORT=${dist_ut_port}+20" "PADDLE_DIST_UT_PORT=${dist_ut_port}+20"
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR}) PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
endif() endif()
endif(NOT APPLE) endif()
endif() endif()
py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf) py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf)
......
...@@ -10,7 +10,7 @@ list(REMOVE_ITEM TEST_OPS "test_fleet_with_asp_sharding") ...@@ -10,7 +10,7 @@ list(REMOVE_ITEM TEST_OPS "test_fleet_with_asp_sharding")
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
if(WITH_DISTRIBUTE) if(WITH_DISTRIBUTE)
if(WITH_GPU if(WITH_GPU
......
...@@ -7,7 +7,7 @@ set(GC_ENVS FLAGS_eager_delete_tensor_gb=0.0) ...@@ -7,7 +7,7 @@ set(GC_ENVS FLAGS_eager_delete_tensor_gb=0.0)
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS})
endforeach(TEST_OP) endforeach()
set_tests_properties(test_autograd_functional_dynamic PROPERTIES TIMEOUT 160) set_tests_properties(test_autograd_functional_dynamic PROPERTIES TIMEOUT 160)
set_tests_properties(test_autograd_functional_static PROPERTIES TIMEOUT 160) set_tests_properties(test_autograd_functional_static PROPERTIES TIMEOUT 160)
......
...@@ -27,4 +27,4 @@ foreach(TEST_OP ${TEST_OPS}) ...@@ -27,4 +27,4 @@ foreach(TEST_OP ${TEST_OPS})
list(APPEND DIST_TEST_OPS ${TEST_OP}) list(APPEND DIST_TEST_OPS ${TEST_OP})
set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 120) set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 120)
set_tests_properties(${TEST_OP} PROPERTIES LABELS "RUN_TYPE=DIST") set_tests_properties(${TEST_OP} PROPERTIES LABELS "RUN_TYPE=DIST")
endforeach(TEST_OP) endforeach()
...@@ -6,4 +6,4 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") ...@@ -6,4 +6,4 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
...@@ -32,8 +32,8 @@ set(TEST_EAGER_OPS ...@@ -32,8 +32,8 @@ set(TEST_EAGER_OPS
test_simnet test_simnet
test_transformer) test_transformer)
list(REMOVE_ITEM TEST_OPS test_lac) list(REMOVE_ITEM TEST_OPS test_lac)
# NOTE(Aurelius84): In case of Windows CI, if open ON_INFER, RWLOCK of Scope will # NOTE(Aurelius84): In case of Windows CI, if open ON_INFER, RWLOCK of Scope
# be removed and will cause some random failed in multi-thread. # will be removed and will cause some random failed in multi-thread.
if(NOT ON_INFER) if(NOT ON_INFER)
py_test_modules(test_lac MODULES test_lac ENVS FLAGS_enable_eager_mode=1) py_test_modules(test_lac MODULES test_lac ENVS FLAGS_enable_eager_mode=1)
set_tests_properties(test_lac PROPERTIES TIMEOUT 120) set_tests_properties(test_lac PROPERTIES TIMEOUT 120)
...@@ -51,7 +51,7 @@ foreach(TEST_OP ${TEST_OPS}) ...@@ -51,7 +51,7 @@ foreach(TEST_OP ${TEST_OPS})
else() else()
py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS}) py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS})
endif() endif()
endforeach(TEST_OP) endforeach()
set_tests_properties(test_se_resnet PROPERTIES TIMEOUT 900) set_tests_properties(test_se_resnet PROPERTIES TIMEOUT 900)
set_tests_properties(test_yolov3 PROPERTIES TIMEOUT 900 LABELS set_tests_properties(test_yolov3 PROPERTIES TIMEOUT 900 LABELS
......
...@@ -6,4 +6,4 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") ...@@ -6,4 +6,4 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
...@@ -9,7 +9,7 @@ if(WITH_IPU) ...@@ -9,7 +9,7 @@ if(WITH_IPU)
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
# set all UTs timeout to 200s # set all UTs timeout to 200s
set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 200) set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 200)
endforeach(TEST_OP) endforeach()
set_tests_properties(test_conv_op_ipu PROPERTIES TIMEOUT 300) set_tests_properties(test_conv_op_ipu PROPERTIES TIMEOUT 300)
set_tests_properties(test_elemetwise_x_op_ipu PROPERTIES TIMEOUT 300) set_tests_properties(test_elemetwise_x_op_ipu PROPERTIES TIMEOUT 300)
......
...@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") ...@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
set_tests_properties(test_concat_mkldnn_op PROPERTIES TIMEOUT 120) set_tests_properties(test_concat_mkldnn_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv3d_mkldnn_op PROPERTIES TIMEOUT 120) set_tests_properties(test_conv3d_mkldnn_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120) set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
...@@ -7,12 +7,13 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") ...@@ -7,12 +7,13 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
if(WITH_ASCEND_CL) if(WITH_ASCEND_CL)
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
# NOTE: NPU `get_float_status` read the value from register, During the test, # NOTE: NPU `get_float_status` read the value from register, During the test,
# it is found that this register will be overwritten by any program on the card. # it is found that this register will be overwritten by any program on the
# In order to prevent the interference of nan/inf in the other unittests, we # card. In order to prevent the interference of nan/inf in the other
# need to set the unittests related to `float_status` to exclusive. # unittests, we need to set the unittests related to `float_status` to
# exclusive.
set_tests_properties(test_amp_check_finite_and_scale_op_npu set_tests_properties(test_amp_check_finite_and_scale_op_npu
PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE") PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
set_tests_properties(test_flags_check_nan_inf_npu set_tests_properties(test_flags_check_nan_inf_npu
......
...@@ -8,4 +8,4 @@ foreach(TEST_OP ${TEST_OPS}) ...@@ -8,4 +8,4 @@ foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
list(APPEND TEST_OPS ${TEST_OP}) list(APPEND TEST_OPS ${TEST_OP})
set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 50) set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 50)
endforeach(TEST_OP) endforeach()
...@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") ...@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
if(NOT WIN32) if(NOT WIN32)
set_tests_properties(test_rnn_nets_static PROPERTIES TIMEOUT 120) set_tests_properties(test_rnn_nets_static PROPERTIES TIMEOUT 120)
set_tests_properties(test_rnn_nets PROPERTIES TIMEOUT 120) set_tests_properties(test_rnn_nets PROPERTIES TIMEOUT 120)
......
...@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") ...@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
set_tests_properties(test_sequence_conv PROPERTIES TIMEOUT 120) set_tests_properties(test_sequence_conv PROPERTIES TIMEOUT 120)
set_tests_properties(test_sequence_concat PROPERTIES TIMEOUT 120) set_tests_properties(test_sequence_concat PROPERTIES TIMEOUT 120)
set_tests_properties(test_sequence_pool PROPERTIES TIMEOUT 120) set_tests_properties(test_sequence_pool PROPERTIES TIMEOUT 120)
...@@ -21,11 +21,11 @@ list(REMOVE_ITEM TEST_OPS test_mean_op_xpu) ...@@ -21,11 +21,11 @@ list(REMOVE_ITEM TEST_OPS test_mean_op_xpu)
foreach(TEST_OP ${TEST_OPS}) foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
foreach(TEST_OP ${DIST_TEST_OPS}) foreach(TEST_OP ${DIST_TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP}) py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP) endforeach()
set_tests_properties(test_mul_op_xpu PROPERTIES TIMEOUT 120) set_tests_properties(test_mul_op_xpu PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv2d_op_xpu PROPERTIES TIMEOUT 120) set_tests_properties(test_conv2d_op_xpu PROPERTIES TIMEOUT 120)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册