未验证 提交 59f89236 编写于 作者: W Wilber 提交者: GitHub

fix cmake-lint problems. (#43406)

* cmake-lint

* update
上级 d74d1838
......@@ -81,64 +81,3 @@ repos:
- id: cmakelint
args: [--config=./tools/codestyle/.cmakelintrc]
# exclude files which need to be fixed
exclude: |
(?x)^(
CMakeLists.txt|
python/paddle/fluid/tests/unittests/CMakeLists.txt|
paddle/fluid/inference/tests/infer_ut/CMakeLists.txt|
cmake/configure.cmake|
paddle/fluid/inference/api/demo_ci/CMakeLists.txt|
cmake/flags.cmake|
cmake/inference_lib.cmake|
cmake/external/protobuf.cmake|
paddle/fluid/framework/fleet/CMakeLists.txt|
paddle/fluid/inference/CMakeLists.txt|
paddle/fluid/inference/tests/api/CMakeLists.txt|
paddle/fluid/operators/CMakeLists.txt|
cmake/external/lite.cmake|
cmake/external/poplar.cmake|
cmake/python_module.cmake|
python/paddle/fluid/tests/unittests/asp/CMakeLists.txt|
cmake/cuda.cmake|
cmake/FindNumPy.cmake|
cmake/coveralls.cmake|
cmake/external/glog.cmake|
cmake/external/onnxruntime.cmake|
cmake/external/openblas.cmake|
cmake/external/xpu.cmake|
cmake/hip.cmake|
paddle/fluid/inference/analysis/ir_passes/CMakeLists.txt|
paddle/fluid/inference/api/CMakeLists.txt|
paddle/fluid/operators/controlflow/CMakeLists.txt|
python/paddle/fluid/tests/unittests/distributed_passes/CMakeLists.txt|
cmake/operators.cmake|
cmake/tensorrt.cmake|
paddle/fluid/inference/api/details/CMakeLists.txt|
python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt|
cmake/external/arm_brpc.cmake|
cmake/external/concurrentqueue.cmake|
cmake/external/eigen.cmake|
cmake/external/mklml.cmake|
cmake/external/paddle2onnx.cmake|
cmake/miopen.cmake|
cmake/nccl.cmake|
cmake/simd.cmake|
paddle/fluid/inference/analysis/CMakeLists.txt|
paddle/fluid/inference/tests/infer_ut/external-cmake/gtest-cpp.cmake|
paddle/fluid/memory/allocation/CMakeLists.txt|
paddle/fluid/memory/CMakeLists.txt|
paddle/fluid/operators/cinn/CMakeLists.txt|
paddle/infrt/external_kernels/CMakeLists.txt|
paddle/infrt/kernel/phi/CMakeLists.txt|
python/paddle/fluid/contrib/slim/tests/CMakeLists.txt|
python/paddle/fluid/tests/unittests/autograd/CMakeLists.txt|
python/paddle/fluid/tests/unittests/distribution/CMakeLists.txt|
python/paddle/fluid/tests/unittests/dygraph_to_static/CMakeLists.txt|
python/paddle/fluid/tests/unittests/fft/CMakeLists.txt|
python/paddle/fluid/tests/unittests/ipu/CMakeLists.txt|
python/paddle/fluid/tests/unittests/mkldnn/CMakeLists.txt|
python/paddle/fluid/tests/unittests/npu/CMakeLists.txt|
python/paddle/fluid/tests/unittests/ps/CMakeLists.txt|
python/paddle/fluid/tests/unittests/rnn/CMakeLists.txt|
python/paddle/fluid/tests/unittests/sequence/CMakeLists.txt
)$
......@@ -16,10 +16,10 @@ if(APPLE AND WITH_ARM)
# cmake 3.19.2 version starts to support M1
cmake_minimum_required(VERSION 3.19.2)
cmake_policy(VERSION 3.19.2)
else(APPLE AND WITH_ARM)
else()
cmake_minimum_required(VERSION 3.15)
cmake_policy(VERSION 3.10)
endif(APPLE AND WITH_ARM)
endif()
# use to get_property location of static lib
# https://cmake.org/cmake/help/v3.0/policy/CMP0026.html?highlight=cmp0026
cmake_policy(SET CMP0026 OLD)
......@@ -152,7 +152,7 @@ if(WIN32)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif()
endforeach(flag_var)
endforeach()
endif()
# NOTE(zhouwei): msvc max/min macro conflict with std::min/max, define NOMINMAX globally
......@@ -179,10 +179,10 @@ if(WIN32)
math(EXPR PROCESS_MAX "${CPU_CORES} * 2 / 3")
set(${flag_var} "${${flag_var}} /MP${PROCESS_MAX}")
endif()
endforeach(flag_var)
endforeach()
foreach(flag_var CMAKE_CXX_FLAGS CMAKE_C_FLAGS)
set(${flag_var} "${${flag_var}} /w")
endforeach(flag_var)
endforeach()
# Windows Remove /Zi, /ZI for Release, MinSizeRel builds
foreach(flag_var
......@@ -191,7 +191,7 @@ if(WIN32)
if(${flag_var} MATCHES "/Z[iI]")
string(REGEX REPLACE "/Z[iI]" "" ${flag_var} "${${flag_var}}")
endif()
endforeach(flag_var)
endforeach()
set(CMAKE_C_FLAGS
"${CMAKE_C_FLAGS} /wd4068 /wd4129 /wd4244 /wd4267 /wd4297 /wd4530 /wd4577 /wd4819 /wd4838"
......@@ -207,7 +207,7 @@ if(WIN32)
if(MSVC_STATIC_CRT)
set(${flag_var} "${${flag_var}} /NODEFAULTLIB:MSVCRT.LIB")
endif()
endforeach(flag_var)
endforeach()
if(WITH_WIN_DUMP_DBG)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Zi")
......@@ -216,16 +216,16 @@ if(WIN32)
foreach(flag_var CMAKE_SHARED_LINKER_FLAGS CMAKE_STATIC_LINKER_FLAGS
CMAKE_EXE_LINKER_FLAGS CMAKE_LINKER_FLAGS)
set(${flag_var} "${${flag_var}} /DEBUG /OPT:REF /OPT:ICF")
endforeach(flag_var)
endforeach()
add_definitions("-DWITH_WIN_DUMP_DBG")
endif()
else(WIN32)
else()
set(CMAKE_CXX_FLAGS
"${CMAKE_CXX_FLAGS} -Wno-error=deprecated-declarations -Wno-deprecated-declarations"
)
endif(WIN32)
endif()
find_package(Git REQUIRED)
......@@ -430,7 +430,7 @@ endif()
if(WITH_ROCM)
include(hip)
include(miopen) # set miopen libraries, must before configure
endif(WITH_ROCM)
endif()
if(WITH_XPU_KP)
include(xpu_kp)
......
......@@ -26,7 +26,7 @@ if(PYTHON_EXECUTABLE)
OUTPUT_VARIABLE NUMPY_PATH)
elseif(_numpy_out)
message(STATUS "Python executable not found.")
endif(PYTHON_EXECUTABLE)
endif()
find_path(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h
HINTS "${NUMPY_PATH}" "${PYTHON_INCLUDE_PATH}")
......@@ -35,7 +35,7 @@ if(PYTHON_NUMPY_INCLUDE_DIR)
set(PYTHON_NUMPY_FOUND
1
CACHE INTERNAL "Python numpy found")
endif(PYTHON_NUMPY_INCLUDE_DIR)
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(NumPy DEFAULT_MSG PYTHON_NUMPY_INCLUDE_DIR)
......@@ -14,19 +14,19 @@
if(NOT WITH_PYTHON)
add_definitions(-DPADDLE_NO_PYTHON)
endif(NOT WITH_PYTHON)
endif()
if(WITH_TESTING)
add_definitions(-DPADDLE_WITH_TESTING)
endif(WITH_TESTING)
endif()
if(WITH_INFERENCE_API_TEST)
add_definitions(-DPADDLE_WITH_INFERENCE_API_TEST)
endif(WITH_INFERENCE_API_TEST)
endif()
if(NOT WITH_PROFILER)
add_definitions(-DPADDLE_DISABLE_PROFILER)
endif(NOT WITH_PROFILER)
endif()
if(WITH_AVX AND AVX_FOUND)
set(SIMD_FLAG ${AVX_FLAG})
......@@ -60,8 +60,8 @@ if(WIN32)
FATAL
"Windows build only support msvc. Which was binded by the nvcc compiler of NVIDIA."
)
endif(NOT MSVC)
endif(WIN32)
endif()
endif()
if(WITH_MUSL)
add_definitions(-DPADDLE_WITH_MUSL)
......@@ -195,9 +195,9 @@ if(WITH_MKLML AND MKLML_IOMP_LIB)
if(WIN32)
# openmp not support well for now on windows
set(OPENMP_FLAGS "")
else(WIN32)
else()
set(OPENMP_FLAGS "-fopenmp")
endif(WIN32)
endif()
set(CMAKE_C_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_CXX_CREATE_SHARED_LIBRARY_FORBIDDEN_FLAGS ${OPENMP_FLAGS})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OPENMP_FLAGS}")
......@@ -221,15 +221,15 @@ endif()
if(WITH_BRPC_RDMA)
add_definitions(-DPADDLE_WITH_BRPC_RDMA)
endif(WITH_BRPC_RDMA)
endif()
if(ON_INFER)
add_definitions(-DPADDLE_ON_INFERENCE)
endif(ON_INFER)
endif()
if(WITH_CRYPTO)
add_definitions(-DPADDLE_WITH_CRYPTO)
endif(WITH_CRYPTO)
endif()
if(WITH_CUSTOM_DEVICE AND NOT WIN32)
add_definitions(-DPADDLE_WITH_CUSTOM_DEVICE)
......
......@@ -96,7 +96,7 @@ if(WITH_COVERAGE)
if(NOT ${EXCLUDE_DIR_FOUND} EQUAL -1)
list(REMOVE_ITEM PADDLE_SOURCES ${TMP_PATH})
endif()
endforeach(TMP_PATH)
endforeach()
endforeach()
# convert to absolute path
......
......@@ -274,7 +274,7 @@ set(CMAKE_CUDA_STANDARD 14)
# So replace /W[1-4] with /W0
if(WIN32)
string(REGEX REPLACE "/W[1-4]" " /W0 " CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS}")
endif(WIN32)
endif()
# in cuda9, suppress cuda warning on eigen
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -w")
# Set :expt-relaxed-constexpr to suppress Eigen warnings
......@@ -293,7 +293,7 @@ if(WIN32)
if(${flag_var} MATCHES "-MD")
string(REGEX REPLACE "-MD" "-MT" ${flag_var} "${${flag_var}}")
endif()
endforeach(flag_var)
endforeach()
endif()
endif()
......
......@@ -25,7 +25,8 @@ if(WIN32)
elseif(LINUX)
if(WITH_ROCM)
# For HIPCC Eigen::internal::device::numeric_limits is not EIGEN_DEVICE_FUNC
# which will cause compiler error of using __host__ funciont in __host__ __device__
# which will cause compiler error of using __host__ funciont
# in __host__ __device__
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/eigen/Meta.h native_src)
file(TO_NATIVE_PATH ${EIGEN_SOURCE_DIR}/Eigen/src/Core/util/Meta.h
native_dst)
......
......@@ -28,12 +28,12 @@ if(WIN32)
CACHE FILEPATH "glog library." FORCE)
set(GLOG_CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4267 /wd4530")
add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
else(WIN32)
else()
set(GLOG_LIBRARIES
"${GLOG_INSTALL_DIR}/lib/libglog.a"
CACHE FILEPATH "glog library." FORCE)
set(GLOG_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
endif(WIN32)
endif()
include_directories(${GLOG_INCLUDE_DIR})
......
......@@ -32,7 +32,8 @@ if(WIN32)
set(MKLML_SHARED_IOMP_LIB ${MKLML_LIB_DIR}/libiomp5md.dll)
else()
#TODO(intel-huying):
# Now enable csrmm function in mklml library temporarily, it will be updated as offical version later.
# Now enable csrmm function in mklml library temporarily,
# it will be updated as offical version later.
set(MKLML_VER
"csrmm_mklml_lnx_2019.0.5"
CACHE STRING "" FORCE)
......@@ -51,8 +52,9 @@ message(STATUS "MKLML_VER: ${MKLML_VER}, MKLML_URL: ${MKLML_URL}")
set(MKLML_PREFIX_DIR ${THIRD_PARTY_PATH}/mklml)
set(MKLML_SOURCE_DIR ${THIRD_PARTY_PATH}/mklml/src/extern_mklml)
# Ninja Generator can not establish the correct dependency relationship between the imported library with target,
# the product file in the ExternalProject need to be specified manually, please refer to
# Ninja Generator can not establish the correct dependency relationship
# between the imported library with target, the product file
# in the ExternalProject need to be specified manually, please refer to
# https://stackoverflow.com/questions/54866067/cmake-and-ninja-missing-and-no-known-rule-to-make-it
# It is the same to all other ExternalProject.
ExternalProject_Add(
......
......@@ -58,7 +58,7 @@ if(NOT WIN32)
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_BYPRODUCTS ${CBLAS_LIBRARIES})
else(NOT WIN32)
else()
set(CBLAS_LIBRARIES
"${CBLAS_INSTALL_DIR}/lib/openblas${CMAKE_STATIC_LIBRARY_SUFFIX}"
CACHE FILEPATH "openblas library." FORCE)
......@@ -92,4 +92,4 @@ else(NOT WIN32)
BUILD_BYPRODUCTS ${CBLAS_LIBRARIES})
set(OPENBLAS_SHARED_LIB
${CBLAS_INSTALL_DIR}/bin/openblas${CMAKE_SHARED_LIBRARY_SUFFIX})
endif(NOT WIN32)
endif()
......@@ -69,7 +69,7 @@ else()
set(PADDLE2ONNX_COMPILE_LIB
"${PADDLE2ONNX_INSTALL_DIR}/lib/libpaddle2onnx.so"
CACHE FILEPATH "paddle2onnx compile library." FORCE)
endif(WIN32)
endif()
if(WIN32)
set(PADDLE2ONNX_URL
......
......@@ -16,7 +16,7 @@ include(ExternalProject)
# Always invoke `FIND_PACKAGE(Protobuf)` for importing function protobuf_generate_cpp
if(NOT WIN32)
find_package(Protobuf QUIET)
endif(NOT WIN32)
endif()
unset_var(PROTOBUF_INCLUDE_DIR)
unset_var(PROTOBUF_FOUND)
......@@ -147,7 +147,7 @@ set(PROTOBUF_ROOT
CACHE PATH "Folder contains protobuf")
if(WIN32)
set(PROTOBUF_ROOT ${THIRD_PARTY_PATH}/install/protobuf)
endif(WIN32)
endif()
if(NOT "${PROTOBUF_ROOT}" STREQUAL "")
find_path(
......@@ -349,4 +349,4 @@ if(NOT PROTOBUF_FOUND)
# `protoc.exe` existed before calling it.
set(EXTERN_PROTOBUF_DEPEND extern_protobuf)
prompt_protobuf_lib(extern_protobuf)
endif(NOT PROTOBUF_FOUND)
endif()
......@@ -134,9 +134,9 @@ if(WITH_XPU_BKCL)
set(XPU_BKCL_INC_DIR "${THIRD_PARTY_PATH}/install/xpu/include")
include_directories(${XPU_BKCL_INC_DIR})
target_link_libraries(xpulib ${XPU_API_LIB} ${XPU_RT_LIB} ${XPU_BKCL_LIB})
else(WITH_XPU_BKCL)
else()
target_link_libraries(xpulib ${XPU_API_LIB} ${XPU_RT_LIB})
endif(WITH_XPU_BKCL)
endif()
add_dependencies(xpulib ${XPU_PROJECT})
......
......@@ -113,10 +113,10 @@ check_type_size(pthread_spinlock_t SPINLOCK_FOUND)
check_type_size(pthread_barrier_t BARRIER_FOUND)
if(SPINLOCK_FOUND)
add_definitions(-DPADDLE_USE_PTHREAD_SPINLOCK)
endif(SPINLOCK_FOUND)
endif()
if(BARRIER_FOUND)
add_definitions(-DPADDLE_USE_PTHREAD_BARRIER)
endif(BARRIER_FOUND)
endif()
set(CMAKE_EXTRA_INCLUDE_FILES "")
# Only one sanitizer is allowed in compile time
......@@ -180,7 +180,7 @@ if(NOT WIN32)
-Wno-parentheses # Warning in Eigen gcc 8.3
)
endif()
endif(NOT APPLE)
endif()
set(GPU_COMMON_FLAGS
-fPIC
......@@ -200,21 +200,21 @@ if(NOT WIN32)
AND NOT WITH_MIPS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -m64")
endif()
endif(NOT WIN32)
endif()
if(APPLE)
if(WITH_ARM)
set(CMAKE_OSX_ARCHITECTURES
"arm64"
CACHE STRING "Build architectures for OSX" FORCE)
else(WITH_ARM)
else()
set(CMAKE_OSX_ARCHITECTURES
"x86_64"
CACHE STRING "Build architectures for OSX" FORCE)
endif(WITH_ARM)
endif()
# On Mac OS X register class specifier is deprecated and will cause warning error on latest clang 10.0
set(COMMON_FLAGS -Wno-deprecated-register)
endif(APPLE)
endif()
if(WITH_HETERPS AND WITH_PSLIB)
set(COMMON_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0 ${COMMON_FLAGS})
......@@ -224,7 +224,7 @@ endif()
if(LINUX)
set(GPU_COMMON_FLAGS -Wall -Wextra -Werror ${GPU_COMMON_FLAGS})
endif(LINUX)
endif()
foreach(flag ${COMMON_FLAGS})
safe_set_cflag(CMAKE_C_FLAGS ${flag})
......
......@@ -112,7 +112,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND HIP_CXX_FLAGS -g2)
list(APPEND HIP_CXX_FLAGS -O0)
list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
endif(CMAKE_BUILD_TYPE MATCHES Debug)
endif()
set(HIP_HCC_FLAGS ${HIP_CXX_FLAGS})
set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
......
......@@ -59,14 +59,14 @@ function(copy TARGET)
POST_BUILD
COMMAND ${PYTHON_EXECUTABLE} ${COPY_SCRIPT_DIR}/copyfile.py
${native_src} ${native_dst})
else(WIN32) #not windows
else() #not windows
add_custom_command(
TARGET ${TARGET}
POST_BUILD
COMMAND mkdir -p "${dst}"
COMMAND cp -r "${src}" "${dst}"
COMMENT "copying ${src} -> ${dst}")
endif(WIN32) # not windows
endif() # not windows
endforeach()
endfunction()
......@@ -265,7 +265,7 @@ if(WIN32)
DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib)
else(WIN32)
else()
set(paddle_inference_lib
${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_inference.*)
copy(
......@@ -273,7 +273,7 @@ else(WIN32)
SRCS ${src_dir}/inference/api/paddle_*.h ${paddle_inference_lib}
DSTS ${PADDLE_INFERENCE_INSTALL_DIR}/paddle/include
${PADDLE_INFERENCE_INSTALL_DIR}/paddle/lib)
endif(WIN32)
endif()
copy(
inference_lib_dist
......@@ -350,11 +350,11 @@ set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
if(WIN32)
set(paddle_inference_c_lib
$<TARGET_FILE_DIR:paddle_inference_c>/paddle_inference_c.*)
else(WIN32)
else()
set(paddle_inference_c_lib
${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi_exp/libpaddle_inference_c.*
)
endif(WIN32)
endif()
copy(
inference_lib_dist
......@@ -436,7 +436,7 @@ set(module "platform")
set(platform_lib_deps profiler_proto errors)
if(WITH_GPU)
set(platform_lib_deps ${platform_lib_deps} external_error_proto)
endif(WITH_GPU)
endif()
add_dependencies(fluid_lib_dist ${platform_lib_deps})
copy(
......
......@@ -65,10 +65,9 @@ macro(find_miopen_version miopen_header_file)
math(EXPR MIOPEN_VERSION "${MIOPEN_MAJOR_VERSION} * 1000 +
${MIOPEN_MINOR_VERSION} * 10 + ${MIOPEN_PATCH_VERSION}")
message(
STATUS
"Current MIOpen header is ${MIOPEN_INCLUDE_DIR}/miopen/miopen.h "
"Current MIOpen version is v${MIOPEN_MAJOR_VERSION}.${MIOPEN_MINOR_VERSION}.${MIOPEN_PATCH_VERSION}. "
)
STATUS "Current MIOpen header is ${MIOPEN_INCLUDE_DIR}/miopen/miopen.h "
"Current MIOpen version is v${MIOPEN_MAJOR_VERSION}.\
${MIOPEN_MINOR_VERSION}.${MIOPEN_PATCH_VERSION}. ")
endif()
endmacro()
......
......@@ -50,10 +50,8 @@ if(WITH_NCCL)
endif()
add_definitions("-DNCCL_VERSION_CODE=$NCCL_VERSION")
message(
STATUS
"Current NCCL header is ${NCCL_INCLUDE_DIR}/nccl.h. "
"Current NCCL version is v${NCCL_MAJOR_VERSION}.${NCCL_MINOR_VERSION}.${NCCL_PATCH_VERSION} "
)
message(STATUS "Current NCCL header is ${NCCL_INCLUDE_DIR}/nccl.h. "
"Current NCCL version is \
v${NCCL_MAJOR_VERSION}.${NCCL_MINOR_VERSION}.${NCCL_PATCH_VERSION} ")
endif()
endif()
......@@ -217,7 +217,7 @@ function(op_library TARGET)
return()
endif()
endforeach()
endif(WIN32)
endif()
# Unity Build relies on global option `WITH_UNITY_BUILD` and local option `UNITY`.
if(WITH_UNITY_BUILD AND op_library_UNITY)
......
......@@ -22,8 +22,8 @@ function(find_python_module module)
set(PY_${module_upper}
${_${module}_location}
CACHE STRING "Location of Python module ${module}")
endif(NOT _${module}_status)
endif(NOT PY_${module_upper})
endif()
endif()
find_package_handle_standard_args(PY_${module} DEFAULT_MSG PY_${module_upper})
if(NOT PY_${module_upper}_FOUND AND ${module}_FIND_REQUIRED)
message(FATAL_ERROR "python module ${module} is not found")
......@@ -39,7 +39,7 @@ function(find_python_module module)
set(PY_${module_upper}_VERSION
${_${module}_version}
CACHE STRING "Version of Python module ${module}")
endif(NOT _${module}_status)
endif()
set(PY_${module_upper}_FOUND
${PY_${module_upper}_FOUND}
......@@ -47,4 +47,4 @@ function(find_python_module module)
set(PY_${module_upper}_VERSION
${PY_${module_upper}_VERSION}
PARENT_SCOPE)
endfunction(find_python_module)
endfunction()
......@@ -81,10 +81,10 @@ check_cxx_source_runs(
#include <immintrin.h>
int main()
{
__m256 a = _mm256_set_ps (-1.0f, 2.0f, -3.0f, 4.0f, -1.0f, 2.0f, -3.0f, 4.0f);
__m256 b = _mm256_set_ps (1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f);
__m256 result = _mm256_add_ps (a, b);
return 0;
__m256 a = _mm256_set_ps(-1.0f, 2.0f, -3.0f, 4.0f, -1.0f, 2.0f, -3.0f, 4.0f);
__m256 b = _mm256_set_ps(1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f);
__m256 result = _mm256_add_ps(a, b);
return 0;
}"
AVX_FOUND)
......
......@@ -5,7 +5,7 @@ if(WITH_PSLIB)
if(NOT WITH_HETERPS)
set(BRPC_DEPS brpc)
endif()
endif(WITH_PSLIB_BRPC)
endif()
cc_library(
fleet_wrapper
SRCS fleet_wrapper.cc
......@@ -21,7 +21,7 @@ else()
fleet_wrapper
SRCS fleet_wrapper.cc
DEPS framework_proto variable_helper scope)
endif(WITH_PSLIB)
endif()
if(WITH_HETERPS)
if(WITH_NCCL AND WITH_GPU)
......@@ -48,7 +48,7 @@ else()
ps_gpu_wrapper
SRCS ps_gpu_wrapper.cc
DEPS gloo_wrapper)
endif(WITH_HETERPS)
endif()
if(WITH_NCCL OR WITH_RCCL)
cc_library(
......@@ -74,7 +74,7 @@ else()
box_wrapper
SRCS box_wrapper.cc
DEPS framework_proto lod_tensor)
endif(WITH_BOX_PS)
endif()
if(WITH_GLOO)
cc_library(
......@@ -94,7 +94,7 @@ else()
metrics
SRCS metrics.cc
DEPS gloo_wrapper)
endif(WITH_GLOO)
endif()
if(WITH_PSLIB)
set(DISTRIBUTE_COMPILE_FLAGS
......
......@@ -97,7 +97,7 @@ set(SHARED_INFERENCE_DEPS ${fluid_modules} phi analysis_predictor
if(WITH_CRYPTO)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} paddle_crypto)
endif(WITH_CRYPTO)
endif()
if(WITH_PSCORE)
set(SHARED_INFERENCE_DEPS ${SHARED_INFERENCE_DEPS} fleet ps_service
......@@ -108,7 +108,7 @@ if(WITH_ONNXRUNTIME)
set(SHARED_INFERENCE_SRCS
${SHARED_INFERENCE_SRCS}
${CMAKE_CURRENT_SOURCE_DIR}/api/onnxruntime_predictor.cc)
endif(WITH_ONNXRUNTIME)
endif()
# Create shared inference library
cc_library(
......
......@@ -87,7 +87,7 @@ function(inference_analysis_test TARGET)
inference_base_test_run(${TARGET} COMMAND ${TARGET} ARGS
${analysis_test_ARGS})
endif()
endfunction(inference_analysis_test)
endfunction()
if(NOT APPLE AND NOT WIN32)
inference_analysis_test(
......
......@@ -15,7 +15,7 @@
if(APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move")
endif(APPLE)
endif()
add_subdirectory(details)
......@@ -84,14 +84,14 @@ if(WITH_ONNXRUNTIME)
infer_io_utils
onnxruntime
paddle2onnx)
else(WITH_ONNXRUNTIME)
else()
cc_library(
analysis_predictor
SRCS analysis_predictor.cc resource_manager.cc infer_context.cc
${mkldnn_quantizer_src}
DEPS ${inference_deps} zero_copy_tensor ir_pass_manager op_compatible_info
infer_io_utils)
endif(WITH_ONNXRUNTIME)
endif()
cc_test(
test_paddle_inference_api
......
......@@ -21,8 +21,8 @@ macro(safe_set_static_flag)
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endif()
endforeach()
endmacro()
if(NOT DEFINED PADDLE_LIB)
......@@ -105,7 +105,7 @@ if(WITH_GPU)
endif()
endif()
message(STATUS "Current CUDA lib path: ${CUDA_LIB}")
endif(NOT WIN32)
endif()
endif()
if(USE_TENSORRT AND WITH_GPU)
......@@ -157,9 +157,9 @@ if(WITH_MKL)
include_directories("${MKLDNN_PATH}/include")
if(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else(WIN32)
else()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif(WIN32)
endif()
endif()
else()
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
......@@ -232,7 +232,7 @@ else()
utf8proc_static
${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32)
endif()
if(WITH_GPU)
if(NOT WIN32)
......
......@@ -26,13 +26,13 @@ if(WITH_ONNXRUNTIME)
zero_copy_tensor_dummy
SRCS zero_copy_tensor_dummy.cc
DEPS onnxruntime)
else(WITH_ONNXRUNTIME)
else()
cc_library(
zero_copy_tensor
SRCS zero_copy_tensor.cc
DEPS scope lod_tensor enforce)
cc_library(zero_copy_tensor_dummy SRCS zero_copy_tensor_dummy.cc)
endif(WITH_ONNXRUNTIME)
endif()
cc_test(
zero_copy_tensor_test
......
......@@ -799,7 +799,7 @@ if(WITH_MKLDNN)
if(NOT LINUX)
download_quant_data_without_verify(${QUANT2_MobileNetV1_MODEL_DIR}
"MobileNet_qat_perf.tar.gz")
endif(NOT LINUX)
endif()
download_quant_data_without_verify(${QUANT2_INT8_MobileNetV1_MODEL_DIR}
"MobileNet_qat_perf_int8.tar.gz")
inference_analysis_api_quant_test_run(
......@@ -829,7 +829,7 @@ if(WITH_MKLDNN)
download_quant_data_without_verify(
${QUANT2_RESNET50_CHANNELWISE_MODEL_DIR}
${QUANT2_RESNET50_CHANNELWISE_MODEL_ARCHIVE})
endif(NOT LINUX)
endif()
set(QUANT2_RESNET50_MODEL
${QUANT2_RESNET50_CHANNELWISE_MODEL_DIR}/ResNet50_qat_channelwise)
inference_analysis_api_quant_test_run(
......
......@@ -22,8 +22,8 @@ macro(safe_set_static_flag)
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if(${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif(${flag_var} MATCHES "/MD")
endforeach(flag_var)
endif()
endforeach()
endmacro()
if(NOT DEFINED PADDLE_LIB)
......@@ -106,7 +106,7 @@ if(WITH_GPU)
endif()
endif()
message(STATUS "Current CUDA lib path: ${CUDA_LIB}")
endif(NOT WIN32)
endif()
endif()
if(USE_TENSORRT AND WITH_GPU)
......@@ -182,9 +182,9 @@ if(WITH_MKL)
include_directories("${MKLDNN_PATH}/include")
if(WIN32)
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
else(WIN32)
else()
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
endif(WIN32)
endif()
endif()
else()
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
......@@ -255,7 +255,7 @@ else()
cryptopp-static
${EXTERNAL_LIB})
set(DEPS ${DEPS} shlwapi.lib)
endif(NOT WIN32)
endif()
if(WITH_GPU)
if(NOT WIN32)
......@@ -302,7 +302,7 @@ if(WITH_GTEST)
${DEMO_NAME}
${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest${CMAKE_STATIC_LIBRARY_SUFFIX}
)
endif(WIN32)
endif()
endif()
if(WIN32)
if("${CMAKE_GENERATOR}" MATCHES "Ninja")
......
......@@ -10,7 +10,8 @@ set(GTEST_REPOSITORY https://github.com/google/googletest.git)
set(GTEST_TAG release-1.8.1)
include_directories(${GTEST_INCLUDE_DIR})
if(WIN32)
# if use CMAKE_INSTALL_LIBDIR, the path of lib actually is install/gtest/lib/gtest.lib but GTEST_LIBRARIES
# if use CMAKE_INSTALL_LIBDIR, the path of lib actually is \
# install/gtest/lib/gtest.lib but GTEST_LIBRARIES
# is install/gtest/gtest.lib
set(GTEST_LIBRARIES
"${GTEST_INSTALL_DIR}/lib/gtest.lib"
......@@ -25,7 +26,7 @@ else()
set(GTEST_MAIN_LIBRARIES
"${GTEST_INSTALL_DIR}/${CMAKE_INSTALL_LIBDIR}/libgtest_main.a"
CACHE FILEPATH "gtest main libraries." FORCE)
endif(WIN32)
endif()
ExternalProject_Add(
extern_gtest
PREFIX gtest
......
......@@ -47,10 +47,8 @@ if(WITH_GPU)
if(WITH_TESTING AND TEST stream_safe_cuda_alloc_test)
set_tests_properties(
stream_safe_cuda_alloc_test
PROPERTIES
ENVIRONMENT
"FLAGS_use_stream_safe_cuda_allocator=true;FLAGS_allocator_strategy=auto_growth"
)
PROPERTIES ENVIRONMENT "FLAGS_use_stream_safe_cuda_allocator=true; \
FLAGS_allocator_strategy=auto_growth")
endif()
endif()
......
......@@ -261,4 +261,4 @@ if(NOT WIN32)
SRCS cuda_ipc_allocator.cc
DEPS allocator)
endif()
endif(NOT WIN32)
endif()
include(operators)
# solve "math constants not defined" problems caused by the order of inclusion
# solve "math constants not defined" problems caused by the order of inclusion
# of <cmath> and the definition of macro _USE_MATH_DEFINES
add_definitions(-D_USE_MATH_DEFINES)
......
......@@ -10,6 +10,6 @@ message(STATUS "external_kernels_lib: ${external_kernels_lib}")
add_test(
NAME run_and_check_external_kernels
COMMAND
sh -c
"${CMAKE_BINARY_DIR}/infrt/host_context/infrtexec -i ${basic_mlir} --shared_libs=${external_kernels_lib} | ${LLVM_PATH}/bin/FileCheck ${basic_mlir}"
)
sh -c "${CMAKE_BINARY_DIR}/infrt/host_context/infrtexec -i ${basic_mlir} \
--shared_libs=${external_kernels_lib} | \
${LLVM_PATH}/bin/FileCheck ${basic_mlir}")
......@@ -102,7 +102,8 @@ function(inference_quant_int8_image_classification_test target quant_model_dir
0.1)
endfunction()
# set batch_size 10 for UT only (avoid OOM). For whole dataset, use batch_size 25
# set batch_size 10 for UT only (avoid OOM).
# For whole dataset, use batch_size 25
function(inference_quant2_int8_image_classification_test target quant_model_dir
fp32_model_dir dataset_path)
py_test(
......@@ -127,7 +128,8 @@ function(inference_quant2_int8_image_classification_test target quant_model_dir
0.1)
endfunction()
# set batch_size 10 for UT only (avoid OOM). For whole dataset, use batch_size 20
# set batch_size 10 for UT only (avoid OOM).
# For whole dataset, use batch_size 20
function(
inference_quant2_int8_nlp_test
target
......@@ -284,7 +286,10 @@ if(LINUX AND WITH_MKLDNN)
download_quant_model(
${QUANT_RESNET101_MODEL_DIR} ${QUANT_RESNET101_MODEL_ARCHIVE}
95c6d01e3aeba31c13efb2ba8057d558)
# inference_quant_int8_image_classification_test(test_quant_int8_resnet101_mkldnn ${QUANT_RESNET101_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# inference_quant_int8_image_classification_test( \
# test_quant_int8_resnet101_mkldnn \
# ${QUANT_RESNET101_MODEL_DIR}/model \
# ${IMAGENET_DATA_PATH})
# Quant GoogleNet
set(QUANT_GOOGLENET_MODEL_DIR "${QUANT_INSTALL_DIR}/GoogleNet_quant")
......@@ -321,18 +326,24 @@ if(LINUX AND WITH_MKLDNN)
set(QUANT_VGG16_MODEL_ARCHIVE "VGG16_qat_model.tar.gz")
download_quant_model(${QUANT_VGG16_MODEL_DIR} ${QUANT_VGG16_MODEL_ARCHIVE}
c37e63ca82a102f47be266f8068b0b55)
# inference_quant_int8_image_classification_test(test_quant_int8_vgg16_mkldnn ${QUANT_VGG16_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# inference_quant_int8_image_classification_test( \
# test_quant_int8_vgg16_mkldnn \
# ${QUANT_VGG16_MODEL_DIR}/model \
# ${IMAGENET_DATA_PATH})
# Quant VGG19
set(QUANT_VGG19_MODEL_DIR "${QUANT_INSTALL_DIR}/VGG19_quant")
set(QUANT_VGG19_MODEL_ARCHIVE "VGG19_qat_model.tar.gz")
download_quant_model(${QUANT_VGG19_MODEL_DIR} ${QUANT_VGG19_MODEL_ARCHIVE}
62bcd4b6c3ca2af67e8251d1c96ea18f)
# inference_quant_int8_image_classification_test(test_quant_int8_vgg19_mkldnn ${QUANT_VGG19_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# inference_quant_int8_image_classification_test( \
# test_quant_int8_vgg19_mkldnn ${QUANT_VGG19_MODEL_DIR}/model \
# ${IMAGENET_DATA_PATH})
### Quant2 for image classification
# Quant2 ResNet50 with input/output scales in `fake_quantize_moving_average_abs_max` operators,
# Quant2 ResNet50 with input/output scales in
# `fake_quantize_moving_average_abs_max` operators,
# with weight scales in `fake_dequantize_max_abs` operators
set(QUANT2_RESNET50_MODEL_DIR "${QUANT_INSTALL_DIR}/ResNet50_quant2")
set(QUANT2_RESNET50_MODEL_ARCHIVE "ResNet50_qat_perf.tar.gz")
......@@ -345,7 +356,8 @@ if(LINUX AND WITH_MKLDNN)
${QUANT2_RESNET50_MODEL_DIR}/ResNet50_qat_perf/float
${FP32_RESNET50_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max` operators and the `out_threshold` attributes,
# Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max`
# operators and the `out_threshold` attributes,
# with weight scales in `fake_dequantize_max_abs` operators
set(QUANT2_RESNET50_RANGE_MODEL_DIR
"${QUANT_INSTALL_DIR}/ResNet50_quant2_range")
......@@ -358,7 +370,8 @@ if(LINUX AND WITH_MKLDNN)
${QUANT2_RESNET50_RANGE_MODEL_DIR}/ResNet50_qat_range
${FP32_RESNET50_MODEL_DIR}/model ${IMAGENET_DATA_PATH})
# Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max` operators and the `out_threshold` attributes,
# Quant2 ResNet50 with input/output scales in `fake_quantize_range_abs_max`
# operators and the `out_threshold` attributes,
# with weight scales in `fake_channel_wise_dequantize_max_abs` operators
set(QUANT2_RESNET50_CHANNELWISE_MODEL_DIR
"${QUANT_INSTALL_DIR}/ResNet50_quant2_channelwise")
......
......@@ -367,7 +367,7 @@ if(APPLE)
if(NOT WITH_DISTRIBUTE)
list(REMOVE_ITEM TEST_OPS test_desc_clone)
list(REMOVE_ITEM TEST_OPS test_program_code)
endif(NOT WITH_DISTRIBUTE)
endif()
message(
WARNING
"These tests has been disabled in OSX before being fixed:\n test_fuse_elewise_add_act_pass \n test_detection_map_op \n test_dist_se_resnext_*"
......@@ -683,7 +683,7 @@ endif()
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_logcumsumexp_op PROPERTIES TIMEOUT 30)
py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS
FLAGS_inner_op_parallelism=4)
......@@ -873,8 +873,8 @@ if(WITH_DISTRIBUTE)
test_fleet_localsgd_meta_optimizer ENVS ${dist_ENVS})
endif()
endif(NOT WIN32)
endif(NOT APPLE)
endif()
endif()
if(WITH_DGC)
# if with dgc, test all dgc tests.
# NOTE. dist dgc tests is already in DIST_TEST_OPS
......@@ -938,7 +938,7 @@ if(WITH_DISTRIBUTE)
message(
FATAL_ERROR "available ports have been exhausted:${dist_ut_port}")
endif()
endforeach(TEST_OP)
endforeach()
# solve it later.
bash_test_modules(
test_fleet_launch_ps
......@@ -974,7 +974,7 @@ if(WITH_DISTRIBUTE)
"PADDLE_DIST_UT_PORT=${dist_ut_port}+20"
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
endif()
endif(NOT APPLE)
endif()
endif()
py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf)
......
......@@ -10,7 +10,7 @@ list(REMOVE_ITEM TEST_OPS "test_fleet_with_asp_sharding")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
if(WITH_DISTRIBUTE)
if(WITH_GPU
......
......@@ -7,7 +7,7 @@ set(GC_ENVS FLAGS_eager_delete_tensor_gb=0.0)
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS})
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_autograd_functional_dynamic PROPERTIES TIMEOUT 160)
set_tests_properties(test_autograd_functional_static PROPERTIES TIMEOUT 160)
......
......@@ -27,4 +27,4 @@ foreach(TEST_OP ${TEST_OPS})
list(APPEND DIST_TEST_OPS ${TEST_OP})
set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 120)
set_tests_properties(${TEST_OP} PROPERTIES LABELS "RUN_TYPE=DIST")
endforeach(TEST_OP)
endforeach()
......@@ -6,4 +6,4 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
......@@ -32,8 +32,8 @@ set(TEST_EAGER_OPS
test_simnet
test_transformer)
list(REMOVE_ITEM TEST_OPS test_lac)
# NOTE(Aurelius84): In case of Windows CI, if open ON_INFER, RWLOCK of Scope will
# be removed and will cause some random failed in multi-thread.
# NOTE(Aurelius84): In case of Windows CI, if open ON_INFER, RWLOCK of Scope
# will be removed and will cause some random failed in multi-thread.
if(NOT ON_INFER)
py_test_modules(test_lac MODULES test_lac ENVS FLAGS_enable_eager_mode=1)
set_tests_properties(test_lac PROPERTIES TIMEOUT 120)
......@@ -51,7 +51,7 @@ foreach(TEST_OP ${TEST_OPS})
else()
py_test_modules(${TEST_OP} MODULES ${TEST_OP} ENVS ${GC_ENVS})
endif()
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_se_resnet PROPERTIES TIMEOUT 900)
set_tests_properties(test_yolov3 PROPERTIES TIMEOUT 900 LABELS
......
......@@ -6,4 +6,4 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
......@@ -9,7 +9,7 @@ if(WITH_IPU)
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
# set all UTs timeout to 200s
set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 200)
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_conv_op_ipu PROPERTIES TIMEOUT 300)
set_tests_properties(test_elemetwise_x_op_ipu PROPERTIES TIMEOUT 300)
......
......@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_concat_mkldnn_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv3d_mkldnn_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_flags_mkldnn_ops_on_off PROPERTIES TIMEOUT 120)
......@@ -7,12 +7,13 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
if(WITH_ASCEND_CL)
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
# NOTE: NPU `get_float_status` read the value from register, During the test,
# it is found that this register will be overwritten by any program on the card.
# In order to prevent the interference of nan/inf in the other unittests, we
# need to set the unittests related to `float_status` to exclusive.
# it is found that this register will be overwritten by any program on the
# card. In order to prevent the interference of nan/inf in the other
# unittests, we need to set the unittests related to `float_status` to
# exclusive.
set_tests_properties(test_amp_check_finite_and_scale_op_npu
PROPERTIES LABELS "RUN_TYPE=EXCLUSIVE")
set_tests_properties(test_flags_check_nan_inf_npu
......
......@@ -8,4 +8,4 @@ foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
list(APPEND TEST_OPS ${TEST_OP})
set_tests_properties(${TEST_OP} PROPERTIES TIMEOUT 50)
endforeach(TEST_OP)
endforeach()
......@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
if(NOT WIN32)
set_tests_properties(test_rnn_nets_static PROPERTIES TIMEOUT 120)
set_tests_properties(test_rnn_nets PROPERTIES TIMEOUT 120)
......
......@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_sequence_conv PROPERTIES TIMEOUT 120)
set_tests_properties(test_sequence_concat PROPERTIES TIMEOUT 120)
set_tests_properties(test_sequence_pool PROPERTIES TIMEOUT 120)
......@@ -21,11 +21,11 @@ list(REMOVE_ITEM TEST_OPS test_mean_op_xpu)
foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
foreach(TEST_OP ${DIST_TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
endforeach()
set_tests_properties(test_mul_op_xpu PROPERTIES TIMEOUT 120)
set_tests_properties(test_conv2d_op_xpu PROPERTIES TIMEOUT 120)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册