未验证 提交 0f3bbe10 编写于 作者: 张春乔 提交者: GitHub

Remove WITH_ASCEND (#52669)

* mv WITH_ASCEND_CL

* mv WITH_ASCEND

* rollback

* remove WITH_ASCEND

* remove WITH_ASCEND
上级 f2d1f284
......@@ -54,7 +54,6 @@ option(WITH_XPU "Compile PaddlePaddle with BAIDU KUNLUN XPU" OFF)
option(WITH_XPU_KP "Compile PaddlePaddle with BAIDU XPU compiler " OFF)
option(WITH_XPU_XFT "Compile PaddlePaddle with BAIDU XPU-XFT" OFF)
option(WITH_WIN_DUMP_DBG "Compile with windows core dump debug mode" OFF)
option(WITH_ASCEND "Compile PaddlePaddle with ASCEND" OFF)
option(WITH_ROCM "Compile PaddlePaddle with ROCM platform" OFF)
option(WITH_IPU "Compile PaddlePaddle with Graphcore IPU" OFF)
option(WITH_ONNXRUNTIME "Compile PaddlePaddle with ONNXRUNTIME" OFF)
......@@ -74,9 +73,6 @@ endif()
if(WITH_GPU AND WITH_XPU_XFT)
message(FATAL_ERROR "Error when compile GPU and XPU-XFT at the same time")
endif()
if(WITH_GPU AND WITH_ASCEND)
message(FATAL_ERROR "Error when compile GPU and ASCEND at the same time")
endif()
if(WITH_GPU AND WITH_ROCM)
message(FATAL_ERROR "Error when compile CUDA and ROCM at the same time")
endif()
......
......@@ -93,10 +93,6 @@ if(WITH_BOX_PS)
add_definitions(-DPADDLE_WITH_BOX_PS)
endif()
if(WITH_ASCEND)
add_definitions(-DPADDLE_WITH_ASCEND)
endif()
if(WITH_ASCEND_INT64)
add_definitions(-DPADDLE_WITH_ASCEND_INT64)
endif()
......
......@@ -234,10 +234,7 @@ function(build_protobuf TARGET_NAME BUILD_FOR_HOST)
"-Dprotobuf_MSVC_STATIC_RUNTIME=${MSVC_STATIC_CRT}")
endif()
if(WITH_ASCEND AND NOT WITH_ASCEND_CXX11)
set(PROTOBUF_REPOSITORY https://gitee.com/tianjianhe/protobuf.git)
set(PROTOBUF_TAG v21.12)
elseif(WITH_IPU)
if(WITH_IPU)
set(PROTOBUF_REPOSITORY ${GIT_URL}/protocolbuffers/protobuf.git)
set(PROTOBUF_TAG v21.12)
elseif(WIN32)
......
......@@ -51,9 +51,7 @@ cc_test_old(
scope
device_context)
if(WITH_DISTRIBUTE
AND NOT WITH_PSLIB
AND NOT (WITH_ASCEND))
if(WITH_DISTRIBUTE AND NOT WITH_PSLIB)
set_source_files_properties(
interceptor_ping_pong_with_brpc_test.cc
PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
......
......@@ -1210,9 +1210,7 @@ inference_analysis_test(
ARGS
--infer_model=${OCR_INSTALL_DIR}/model)
if(WITH_DISTRIBUTE
AND WITH_PSCORE
AND NOT (WITH_ASCEND))
if(WITH_DISTRIBUTE AND WITH_PSCORE)
inference_analysis_test(
test_analyzer_dist_model
SRCS
......
......@@ -153,9 +153,7 @@ endif()
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} layer)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} tensor_formatter)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} op_version_registry)
if (WITH_ASCEND)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} ascend_wrapper)
endif()
# FIXME(typhoonzero): operator deps may not needed.
# op_library(unsqueeze_op DEPS reshape_op)
......
......@@ -172,11 +172,6 @@ if(WITH_PYTHON)
endif()
set(PYBIND_DEPS ${PYBIND_DEPS} processgroup_comm_utils)
if(WITH_ASCEND)
set(PYBIND_DEPS ${PYBIND_DEPS} ascend_wrapper)
set(PYBIND_SRCS ${PYBIND_SRCS} ascend_wrapper_py.cc)
endif()
if(WITH_GLOO)
set(PYBIND_DEPS ${PYBIND_DEPS} gloo_context)
set(PYBIND_SRCS ${PYBIND_SRCS} gloo_context_py.cc)
......
......@@ -15,9 +15,7 @@ list(REMOVE_ITEM DIST_TEST_OPS "test_dist_op")
string(REPLACE ".py" "" DIST_TEST_OPS "${DIST_TEST_OPS}")
if((NOT WITH_GPU)
AND (NOT WITH_XPU)
AND NOT (WITH_ASCEND))
if((NOT WITH_GPU) AND (NOT WITH_XPU))
list(REMOVE_ITEM DIST_TEST_OPS "test_dist_mnist_batch_merge")
endif()
list(APPEND DIST_TEST_OPS test_parallel_dygraph_dataparallel)
......@@ -493,9 +491,7 @@ foreach(TEST_OP ${TEST_EAGER_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach()
if((NOT WITH_GPU)
AND (NOT WITH_XPU)
AND NOT (WITH_ASCEND))
if((NOT WITH_GPU) AND (NOT WITH_XPU))
list(REMOVE_ITEM TEST_OPS "test_dist_mnist_batch_merge")
endif()
......@@ -507,7 +503,6 @@ py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS
FLAGS_inner_op_parallelism=4)
if(WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR APPLE)
py_test_modules(test_warpctc_op MODULES test_warpctc_op)
set_tests_properties(test_warpctc_op PROPERTIES TIMEOUT 120)
......@@ -643,9 +638,7 @@ if(WITH_DISTRIBUTE)
bash_test_modules(test_fleetrun START_BASH test_fleetrun.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
if(WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
if(WITH_GPU OR WITH_XPU)
bash_test_modules(
test_fleet_launch_nproc START_BASH test_fleet_launch_nproc.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
......@@ -659,13 +652,6 @@ if(WITH_DISTRIBUTE)
test_fleet_launch_cloud START_BASH test_fleet_launch_cloud.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
endif()
if(WITH_ASCEND)
bash_test_modules(
test_fleet_launch_ascend START_BASH test_fleet_launch_ascend.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
bash_test_modules(test_ascend_group START_BASH test_ascend_group.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
endif()
# port range (20000, 21200) is reserved for dist-ops
set(dist_ut_port 20001)
......
......@@ -368,10 +368,7 @@ if((WITH_GPU OR WITH_ROCM) AND (LINUX))
set_tests_properties(test_eager_dist_api PROPERTIES TIMEOUT "120" LABELS
"RUN_TYPE=DIST")
endif()
if((WITH_GPU
OR WITH_ROCM
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_ROCM) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_gen_nccl_id_op
START_BASH
......
......@@ -4,10 +4,7 @@
# and then run the command `python3 ${PADDLE_ROOT}/tools/gen_ut_cmakelists.py -f ${CURRENT_DIRECTORY}/testslist.csv`
set(LOCAL_ALL_ARCH ON)
set(LOCAL_ALL_PLAT ON)
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
py_test_modules(
test_fleet_sharding_meta_optimizer MODULES
test_fleet_sharding_meta_optimizer ENVS
......@@ -81,10 +78,7 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
test_fleet_fp16_allreduce_meta_optimizer ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_rnn_dp
START_BASH
......@@ -174,10 +168,7 @@ if((WITH_GPU) AND LOCAL_ALL_PLAT)
test_parallel_dygraph_pipeline_parallel_with_virtual_stage
PROPERTIES TIMEOUT "500")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND (LINUX))
if((WITH_GPU OR WITH_XPU) AND (LINUX))
py_test_modules(
test_fleet_localsgd_meta_optimizer MODULES
test_fleet_localsgd_meta_optimizer ENVS
......@@ -268,10 +259,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
set_tests_properties(test_parallel_dygraph_control_flow PROPERTIES TIMEOUT
"350")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_lars_meta_optimizer
START_BASH
......@@ -321,10 +309,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
"FLAGS_communicator_send_queue_size=1;FLAGS_communicator_max_merge_var_num=1;http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python"
)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_pipeline_meta_optimizer
START_BASH
......@@ -335,10 +320,7 @@ if((WITH_GPU
"PADDLE_DIST_UT_PORT=21236;http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python"
)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
py_test_modules(
test_fleet_gradient_merge_meta_optimizer MODULES
test_fleet_gradient_merge_meta_optimizer ENVS
......@@ -354,10 +336,7 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
test_fleet_meta_optimizer_base MODULES test_fleet_meta_optimizer_base ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_raw_program_meta_optimizer
START_BASH
......@@ -433,10 +412,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
set_tests_properties(test_parallel_dygraph_unused_variables PROPERTIES TIMEOUT
"350")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND (LINUX))
if((WITH_GPU OR WITH_XPU) AND (LINUX))
py_test_modules(
test_fleet_lamb_meta_optimizer MODULES test_fleet_lamb_meta_optimizer ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
......@@ -461,10 +437,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
set_tests_properties(test_parallel_dygraph_no_sync_gradient_check
PROPERTIES TIMEOUT "60")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_pipeline_meta_optimizer_with_recompute
START_BASH
......@@ -475,10 +448,7 @@ if((WITH_GPU
"PADDLE_DIST_UT_PORT=21252;http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python"
)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND (LINUX OR WIN32))
if((WITH_GPU OR WITH_XPU) AND (LINUX OR WIN32))
py_test_modules(
test_fleet_hybrid_meta_optimizer MODULES test_fleet_hybrid_meta_optimizer
ENVS
......@@ -526,10 +496,7 @@ if((WITH_ROCM) AND LOCAL_ALL_PLAT)
set_tests_properties(test_parallel_dygraph_sparse_embedding PROPERTIES TIMEOUT
"200")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
py_test_modules(
test_fleet_amp_meta_optimizer MODULES test_fleet_amp_meta_optimizer ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
......@@ -581,10 +548,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
)
set_tests_properties(test_auto_parallel_parallelizer PROPERTIES TIMEOUT "120")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND (LINUX OR WIN32))
if((WITH_GPU OR WITH_XPU) AND (LINUX OR WIN32))
py_test_modules(
test_fleet_recompute_meta_optimizer MODULES
test_fleet_recompute_meta_optimizer ENVS
......@@ -595,10 +559,7 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
test_fleet_private_function MODULES test_fleet_private_function ENVS
"http_proxy=;https_proxy=;PYTHONPATH=../..:${PADDLE_BINARY_DIR}/python")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
if((WITH_GPU OR WITH_XPU) AND LOCAL_ALL_PLAT)
bash_test_modules(
test_new_group
START_BASH
......@@ -608,10 +569,7 @@ if((WITH_GPU
ENVS
"PADDLE_DIST_UT_PORT=21268;http_proxy=;https_proxy=")
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
AND (LINUX))
if((WITH_GPU OR WITH_XPU) AND (LINUX))
bash_test_modules(
test_c_comm_init_op
START_BASH
......
......@@ -4,9 +4,7 @@ file(
"test_*.py")
string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
if((NOT WITH_GPU)
AND (NOT WITH_XPU)
AND NOT (WITH_ASCEND))
if((NOT WITH_GPU) AND (NOT WITH_XPU))
list(REMOVE_ITEM TEST_OPS "test_dist_fuse_adam_pass")
list(REMOVE_ITEM TEST_OPS "test_dist_fuse_all_reduce_pass")
list(REMOVE_ITEM TEST_OPS "test_dist_fuse_bn_act_pass")
......
......@@ -24,9 +24,7 @@ endif()
if((WITH_DISTRIBUTE)
AND (NOT WIN32)
AND (NOT APPLE))
if(WITH_GPU
OR WITH_XPU
OR WITH_ASCEND)
if(WITH_GPU OR WITH_XPU)
py_test_modules(test_fleet_with_asp_sharding MODULES
test_fleet_with_asp_sharding ENVS ${dist_ENVS})
endif()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册