diff --git a/CMakeLists.txt b/CMakeLists.txt index 50070c7fc05133da758650eb5ac50e32effe63c9..a0a0d57f1984b62af3202015fe34d292c3cd4261 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -119,8 +119,6 @@ if(WIN32) endforeach(flag_var) endif() - math(EXPR PROCESS_MAX "${CPU_CORES} * 2 / 3") - # windows build turn off warnings, use parallel compiling. foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE @@ -128,8 +126,12 @@ if(WIN32) CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO) string(REGEX REPLACE "/W[1-4]" " /W0 " ${flag_var} "${${flag_var}}") - # NOTE(zhouwei25): GPU compile have too high memory utilization when parallel compiling - if(NOT WITH_GPU) + + # NOTE(zhouwei25): GPU compile have too high memory utilization when parallel compiling, + # For Visual Studio generators, /MP should be added. + # For other generators like Ninja, it is not need to add /MP. + if("${CMAKE_GENERATOR}" STREQUAL "Visual Studio" AND NOT WITH_GPU) + math(EXPR PROCESS_MAX "${CPU_CORES} * 2 / 3") set(${flag_var} "${${flag_var}} /MP${PROCESS_MAX}") endif() endforeach(flag_var) diff --git a/cmake/ccache.cmake b/cmake/ccache.cmake index 25798758473af52dc66230ac70a7d750e78176de..5520720f7a6c719ffb438ff1a6c95f345f0ae55d 100644 --- a/cmake/ccache.cmake +++ b/cmake/ccache.cmake @@ -18,7 +18,7 @@ elseif("${CMAKE_GENERATOR}" STREQUAL "Ninja") if(SCCACHE_PATH) execute_process(COMMAND sccache -V OUTPUT_VARIABLE sccache_version) - message(STATUS "${sccache_version} is founded, use [${SCCACHE_PATH}] to speed up compile on Windows.") + message(STATUS "sccache is founded, use [${SCCACHE_PATH}] to speed up compile on Windows.") set(CMAKE_C_COMPILER_LAUNCHER ${SCCACHE_PATH}) set(CMAKE_CXX_COMPILER_LAUNCHER ${SCCACHE_PATH}) diff --git a/paddle/scripts/paddle_build.bat b/paddle/scripts/paddle_build.bat index bebcfe64406d9ed43ae665e50fa280dc0595a057..62d30a50d6be4d9ff368c87b86cc15f2c838a8ad 100644 --- a/paddle/scripts/paddle_build.bat +++ b/paddle/scripts/paddle_build.bat @@ -324,14 +324,17 @@ if %day_now% NEQ %day_before% ( echo %day_now% > %cache_dir%\day.txt type %cache_dir%\day.txt if %day_now% EQU 21 ( + del D:\sccache\sccache_log.txt rmdir %cache_dir%\third_party_GPU /s/q rmdir %cache_dir%\third_party /s/q ) if %day_now% EQU 11 ( + del D:\sccache\sccache_log.txt rmdir %cache_dir%\third_party_GPU /s/q rmdir %cache_dir%\third_party /s/q ) if %day_now% EQU 01 ( + del D:\sccache\sccache_log.txt rmdir %cache_dir%\third_party_GPU /s/q rmdir %cache_dir%\third_party /s/q ) diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index 5108d34f7bf779413c630b2b1fa31f5b8095e68d..fe0be21bfdf44eb3cf843b689c6ac7c57ec5dfed 100644 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -685,7 +685,6 @@ TWO_PARALLEL_JOB = [ 'test_nn_functional_hot_op', 'test_op_name_conflict', 'test_imperative_gan', - 'test_simnet', 'test_amp_check_finite_and_scale_op', 'test_random_seed', 'test_histogram_op', @@ -819,7 +818,6 @@ TWO_PARALLEL_JOB = [ 'test_prelu_op', 'test_fill_zeros_like_op', 'test_pool2d_op', - 'test_for_enumerate', 'test_gather_op', 'test_partial_concat_op', 'test_gaussian_random_op', @@ -883,7 +881,6 @@ TWO_PARALLEL_JOB = [ 'test_empty_like_op', 'test_rank_loss_op', 'test_elementwise_mod_op', - 'test_reinforcement_learning', 'test_elementwise_max_op', 'test_retain_graph', 'test_edit_distance_op', @@ -1001,7 +998,6 @@ TWO_PARALLEL_JOB = [ 'test_static_save_load', 'test_coalesce_tensor_op', 'test_fuse_bn_act_pass', - 'test_simnet_v2', 'test_shard_index_op', 'test_cuda_random_seed', 'test_dequantize_log_op', @@ -1023,7 +1019,6 @@ TWO_PARALLEL_JOB = [ 'test_py_reader_pin_memory', 'test_train_recognize_digits', 'test_parallel_executor_feed_persistable_var', - 'test_mnist', 'test_update_loss_scaling_op', 'test_rnn_cell_api', 'test_imperative_load_static_param',