未验证 提交 2b40434e 编写于 作者: 张春乔 提交者: GitHub

昇腾和寒武纪相关代码退场 WITH_ASCEND_CL (#52612)

* mv WITH_ASCEND_CL

* mv WITH_ASCEND

* rollback
上级 ed9bac2f
......@@ -53,7 +53,7 @@ cc_test_old(
if(WITH_DISTRIBUTE
AND NOT WITH_PSLIB
AND NOT (WITH_ASCEND OR WITH_ASCEND_CL))
AND NOT (WITH_ASCEND))
set_source_files_properties(
interceptor_ping_pong_with_brpc_test.cc
PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
......
......@@ -123,16 +123,6 @@ if(NOT WIN32)
SRCS reducer.cc
DEPS layer)
endif()
if(WITH_ASCEND_CL)
cc_library(
hccl_context
SRCS hccl_context.cc
DEPS collective_helper device_context tensor var_type_traits)
cc_library(
reducer
SRCS reducer.cc
DEPS layer)
endif()
if(WITH_CNCL)
cc_library(
cncl_context
......@@ -145,8 +135,7 @@ if(NOT WIN32)
endif()
if(WITH_NCCL
OR WITH_RCCL
OR WITH_XPU_BKCL
OR WITH_ASCEND_CL)
OR WITH_XPU_BKCL)
cc_library(
heter_ccl_context
SRCS heter_ccl_context.cc
......@@ -167,7 +156,6 @@ if(WITH_GLOO)
(WITH_NCCL
OR WITH_RCCL
OR WITH_XPU_BKCL
OR WITH_ASCEND_CL
OR WITH_CNCL)
))
cc_library(
......@@ -177,32 +165,16 @@ if(WITH_GLOO)
endif()
endif()
if(NOT WITH_ASCEND_CL)
cc_library(
gradient_accumulator
SRCS gradient_accumulator.cc
DEPS blas
operator
lod_tensor
selected_rows_utils
selected_rows_functor
var_type_traits
layer
math_function
phi_tensor
${MLU_DEPS})
else()
cc_library(
gradient_accumulator
SRCS gradient_accumulator.cc
DEPS blas
operator
lod_tensor
selected_rows_utils
selected_rows_functor
var_type_traits
layer
math_function
npu_op_runner
phi_tensor)
endif()
cc_library(
gradient_accumulator
SRCS gradient_accumulator.cc
DEPS blas
operator
lod_tensor
selected_rows_utils
selected_rows_functor
var_type_traits
layer
math_function
phi_tensor
${MLU_DEPS})
......@@ -1212,7 +1212,7 @@ inference_analysis_test(
if(WITH_DISTRIBUTE
AND WITH_PSCORE
AND NOT (WITH_ASCEND OR WITH_ASCEND_CL))
AND NOT (WITH_ASCEND))
inference_analysis_test(
test_analyzer_dist_model
SRCS
......
......@@ -117,10 +117,6 @@ if (WITH_GPU OR WITH_ROCM)
endif()
endif()
if (WITH_ASCEND_CL)
op_library(sync_batch_norm_op)
endif()
op_library(lstm_op DEPS ${OP_HEADER_DEPS} lstm_compute)
op_library(recurrent_op DEPS ${OP_HEADER_DEPS})
......@@ -161,11 +157,6 @@ if (WITH_ASCEND)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} ascend_wrapper)
endif()
if (WITH_ASCEND_CL)
cc_test(assign_op_npu_test SRCS assign_op_npu_test.cc DEPS generated_static_op)
set(COMMON_OP_DEPS ${COMMON_OP_DEPS} npu_op_runner)
endif()
# FIXME(typhoonzero): operator deps may not needed.
# op_library(unsqueeze_op DEPS reshape_op)
# op_library(squeeze_op DEPS reshape_op)
......@@ -200,18 +191,10 @@ if (WITH_PYTHON)
cc_library(py_func_op SRCS py_func_op.cc DEPS op_registry python pybind)
endif()
if (WITH_ASCEND_CL)
cc_test(range_op_npu_test SRCS range_op_npu_test.cc DEPS op_registry range_op scope device_context enforce executor)
cc_test(expand_op_npu_test SRCS expand_op_npu_test.cc DEPS op_registry expand_op eigen_function scope device_context enforce executor compare_op)
endif()
set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library")
add_subdirectory(benchmark)
cc_test_old(op_debug_string_test SRCS op_debug_string_test.cc DEPS elementwise_add_op ${COMMON_OP_DEPS})
if (WITH_ASCEND_CL)
cc_test(transpose_op_npu_test SRCS transpose_op_npu_test.cc DEPS op_registry transpose_op scope device_context enforce executor)
endif()
if(WITH_MKLDNN)
......@@ -227,7 +210,7 @@ if(WITH_UNITY_BUILD)
target_link_libraries(paddle_operators_unity ${OP_HEADER_DEPS} ${COMMON_OP_DEPS})
endif()
if (WITH_GPU OR WITH_ASCEND_CL)
if (WITH_GPU)
cc_test(copy_cross_scope_test SRCS copy_cross_scope_test.cc DEPS op_registry copy_cross_scope_op scope device_context enforce executor)
endif()
......
......@@ -4,11 +4,3 @@ if(WITH_UNITY_BUILD)
include(unity_build_rule.cmake)
endif()
register_operators()
if(WITH_ASCEND_CL)
cc_test(
check_finite_and_unscale_op_npu_test
SRCS check_finite_and_unscale_op_npu_test.cc
DEPS op_registry check_finite_and_unscale_op scope device_context enforce
executor)
endif()
......@@ -51,106 +51,9 @@ if(WITH_CNCL)
op_library(c_gen_cncl_id_op DEPS ${COLLECTIVE_DEPS})
endif()
if(WITH_ASCEND_CL)
cc_library(
gen_hccl_id_op_helper
SRCS gen_hccl_id_op_helper.cc
DEPS dynload_warpctc dynamic_loader scope)
set(COLLECTIVE_DEPS ${COLLECTIVE_DEPS} collective_helper
gen_hccl_id_op_helper)
op_library(c_gen_hccl_id_op DEPS ${COLLECTIVE_DEPS})
op_library(gen_hccl_id_op DEPS ${COLLECTIVE_DEPS})
endif()
set(OPERATOR_DEPS
${OPERATOR_DEPS} ${COLLECTIVE_DEPS}
PARENT_SCOPE)
set(GLOB_COLLECTIVE_DEPS
${COLLECTIVE_DEPS}
CACHE INTERNAL "collective dependency")
if(WITH_ASCEND_CL)
set(COMMON_TEST_DEPS_FOR_HCOM
c_comm_init_hccl_op
c_gen_hccl_id_op
gen_hccl_id_op_helper
gen_hccl_id_op
op_registry
ascend_hccl
flags
dynamic_loader
dynload_warpctc
scope
device_context
enforce
executor)
cc_test(
c_broadcast_op_npu_test
SRCS c_broadcast_op_npu_test.cc
DEPS c_broadcast_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
c_allreduce_sum_op_npu_test
SRCS c_allreduce_sum_op_npu_test.cc
DEPS c_allreduce_sum_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
c_reducescatter_op_npu_test
SRCS c_reducescatter_op_npu_test.cc
DEPS c_reducescatter_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
c_allgather_op_npu_test
SRCS c_allgather_op_npu_test.cc
DEPS c_allgather_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
c_reduce_sum_op_npu_test
SRCS c_reduce_sum_op_npu_test.cc
DEPS c_reduce_sum_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
c_allreduce_max_op_npu_test
SRCS c_allreduce_max_op_npu_test.cc
DEPS c_allreduce_max_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
send_v2_op_npu_test
SRCS send_v2_op_npu_test.cc
DEPS send_v2_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
recv_v2_op_npu_test
SRCS recv_v2_op_npu_test.cc
DEPS recv_v2_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
checknumeric
SRCS checknumeric_npu_test.cc
DEPS c_allreduce_sum_op ${COLLECTIVE_DEPS} ${COMMON_TEST_DEPS_FOR_HCOM})
cc_test(
c_sync_comm_stream_op_npu_test
SRCS c_sync_comm_stream_op_npu_test.cc
DEPS op_registry
c_broadcast_op
c_comm_init_hccl_op
c_sync_comm_stream_op
c_gen_hccl_id_op
gen_hccl_id_op_helper
${COLLECTIVE_DEPS}
ascend_hccl
dynamic_loader
dynload_warpctc
scope
device_context
enforce
executor)
cc_test(
c_sync_calc_stream_op_npu_test
SRCS c_sync_calc_stream_op_npu_test.cc
DEPS op_registry
elementwise_add_op
c_sync_calc_stream_op
c_gen_hccl_id_op
gen_hccl_id_op_helper
${COLLECTIVE_DEPS}
ascend_hccl
dynamic_loader
dynload_warpctc
scope
device_context
enforce
executor)
endif()
......@@ -17,16 +17,3 @@ cc_test(
test_elementwise_add_grad_grad
SRCS test_elementwise_add_grad_grad.cc
DEPS op_registry elementwise_add_op scope device_context enforce executor)
if(WITH_ASCEND_CL)
cc_test(
elementwise_op_npu_test
SRCS elementwise_op_npu_test.cc
DEPS op_registry
elementwise_add_op
elementwise_sub_op
scope
device_context
enforce
executor)
endif()
......@@ -16,9 +16,7 @@ math_library(sampler DEPS generator)
# math_library(math_function DEPS blas dense_tensor tensor)
math_library(sequence_pooling DEPS math_function jit_kernel_helper)
if(WITH_ASCEND_CL)
math_library(beam_search DEPS math_function beam_search_npu)
elseif(WITH_XPU)
if(WITH_XPU)
math_library(beam_search DEPS math_function beam_search_xpu)
else()
math_library(beam_search DEPS math_function)
......
......@@ -33,10 +33,3 @@ if(WITH_ROCM)
SRCS check_reduce_rank_test.cu
DEPS tensor)
endif()
if(WITH_ASCEND_CL)
cc_test(
reduce_any_op_npu_test
SRCS reduce_any_op_npu_test.cc
DEPS op_registry reduce_any_op scope device_context enforce executor)
endif()
......@@ -74,17 +74,6 @@ else()
set(IPU_CTX_DEPS)
endif()
if(WITH_ASCEND_CL)
set(NPU_CTX_DEPS npu_stream npu_info)
endif()
if(WITH_ASCEND_CL)
cc_library(
stream_callback_manager
SRCS stream_callback_manager.cc
DEPS simple_threadpool enforce)
endif()
if(WITH_GPU)
nv_library(
stream_callback_manager
......@@ -100,8 +89,6 @@ endif()
if(WITH_GPU OR WITH_ROCM)
set(STREAM_CALLBACK_DEPS stream_callback_manager)
elseif(WITH_ASCEND_CL)
set(STREAM_CALLBACK_DEPS stream_callback_manager)
else()
set(STREAM_CALLBACK_DEPS)
endif()
......@@ -151,9 +138,6 @@ cc_library(
collective_helper
SRCS collective_helper.cc gen_comm_id_helper.cc
DEPS framework_proto device_context enforce)
if(WITH_ASCEND_CL)
target_link_libraries(collective_helper npu_collective_helper)
endif()
if(WITH_CNCL)
target_link_libraries(collective_helper mlu_collective_helper)
......@@ -163,10 +147,6 @@ if(WITH_GPU OR WITH_ROCM)
target_link_libraries(device_context gpu_resource_pool)
endif()
if(WITH_ASCEND_CL)
target_link_libraries(device_context npu_resource_pool)
endif()
if(WITH_XPU)
target_link_libraries(device_context xpu_resource_pool)
endif()
......@@ -190,16 +170,6 @@ set(DEVICE_EVENT_LIBS
device_event_base
CACHE INTERNAL "device event libs")
if(WITH_ASCEND_CL)
cc_library(
device_event_npu
SRCS device_event_npu.cc
DEPS device_event_base npu_resource_pool)
set(DEVICE_EVENT_LIBS
device_event_npu
CACHE INTERNAL "device event libs")
endif()
if(WITH_GPU)
nv_library(
device_event_gpu
......
......@@ -62,11 +62,6 @@ if(WITH_ROCM)
dynload_warpctc
SRCS warpctc.cc
DEPS dynamic_loader warpctc phi_dynload_warpctc)
elseif(WITH_ASCEND_CL)
cc_library(
dynload_warpctc
SRCS warpctc.cc
DEPS dynamic_loader warpctc npu_hccl phi_dynload_warpctc)
else()
nv_library(
dynload_cuda
......
......@@ -91,12 +91,6 @@ if(WITH_XPU_BKCL)
set(PYBIND_DEPS ${PYBIND_DEPS} heter_ccl_context)
endif()
if(WITH_ASCEND_CL)
set(PYBIND_DEPS ${PYBIND_DEPS} reducer)
set(PYBIND_DEPS ${PYBIND_DEPS} hccl_context)
set(PYBIND_DEPS ${PYBIND_DEPS} heter_ccl_context)
endif()
if(WITH_CNCL)
set(PYBIND_DEPS ${PYBIND_DEPS} reducer)
set(PYBIND_DEPS ${PYBIND_DEPS} cncl_context)
......@@ -249,28 +243,16 @@ endif()
if(WITH_PYTHON)
# generate op pybind functions automatically for dygraph.
if(WITH_ASCEND_CL)
set(OP_FUNCTION_GENERETOR_DEPS
pybind
proto_desc
executor
layer
tracer
engine
imperative_profiler
imperative_flag
ascend_wrapper)
else()
set(OP_FUNCTION_GENERETOR_DEPS
pybind
proto_desc
executor
layer
tracer
engine
imperative_profiler
imperative_flag)
endif()
set(OP_FUNCTION_GENERETOR_DEPS
pybind
proto_desc
executor
layer
tracer
engine
imperative_profiler
imperative_flag)
list(APPEND OP_FUNCTION_GENERETOR_DEPS ${GLOB_OP_LIB})
list(APPEND OP_FUNCTION_GENERETOR_DEPS ${GLOB_OPERATOR_DEPS})
......@@ -282,10 +264,6 @@ if(WITH_PYTHON)
list(APPEND OP_FUNCTION_GENERETOR_DEPS bkcl_context)
endif()
if(WITH_ASCEND_CL)
list(APPEND OP_FUNCTION_GENERETOR_DEPS hccl_context)
endif()
if(WITH_CNCL)
list(APPEND OP_FUNCTION_GENERETOR_DEPS cncl_context)
endif()
......
......@@ -6,8 +6,6 @@ if(WITH_GPU)
set(PACKAGE_NAME "paddlepaddle-gpu")
elseif(WITH_ROCM)
set(PACKAGE_NAME "paddlepaddle-rocm")
elseif(WITH_ASCEND_CL)
set(PACKAGE_NAME "paddlepaddle-npu")
elseif(WITH_XPU)
set(PACKAGE_NAME "paddlepaddle-xpu")
elseif(WITH_IPU)
......
......@@ -17,7 +17,7 @@ string(REPLACE ".py" "" DIST_TEST_OPS "${DIST_TEST_OPS}")
if((NOT WITH_GPU)
AND (NOT WITH_XPU)
AND NOT (WITH_ASCEND OR WITH_ASCEND_CL))
AND NOT (WITH_ASCEND))
list(REMOVE_ITEM DIST_TEST_OPS "test_dist_mnist_batch_merge")
endif()
list(APPEND DIST_TEST_OPS test_parallel_dygraph_dataparallel)
......@@ -272,47 +272,25 @@ function(py_test_modules TARGET_NAME)
if(WITH_COVERAGE AND NOT (WITH_INCREMENTAL_COVERAGE
AND "$ENV{PADDLE_GIT_DIFF_PY_FILE}" STREQUAL ""))
if(WITH_ASCEND_CL)
add_test(
NAME ${TARGET_NAME}
COMMAND
${CMAKE_COMMAND} -E env
PYTHONPATH=${PADDLE_BINARY_DIR}/python:$ENV{PYTHONPATH}
${py_test_modules_ENVS}
COVERAGE_FILE=${PADDLE_BINARY_DIR}/python-coverage.data
${PYTHON_EXECUTABLE} -m coverage run --branch -p
${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else()
add_test(
NAME ${TARGET_NAME}
COMMAND
${CMAKE_COMMAND} -E env PYTHONPATH=${PADDLE_BINARY_DIR}/python
${py_test_modules_ENVS}
COVERAGE_FILE=${PADDLE_BINARY_DIR}/python-coverage.data
${PYTHON_EXECUTABLE} -m coverage run --branch -p
${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
add_test(
NAME ${TARGET_NAME}
COMMAND
${CMAKE_COMMAND} -E env PYTHONPATH=${PADDLE_BINARY_DIR}/python
${py_test_modules_ENVS}
COVERAGE_FILE=${PADDLE_BINARY_DIR}/python-coverage.data
${PYTHON_EXECUTABLE} -m coverage run --branch -p
${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else()
if(WITH_ASCEND_CL)
add_test(
NAME ${TARGET_NAME}
COMMAND
${CMAKE_COMMAND} -E env
PYTHONPATH=${PADDLE_BINARY_DIR}/python:$ENV{PYTHONPATH}
${py_test_modules_ENVS} ${PYTHON_EXECUTABLE}
${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else()
add_test(
NAME ${TARGET_NAME}
COMMAND
${CMAKE_COMMAND} -E env PYTHONPATH=${PADDLE_BINARY_DIR}/python
${py_test_modules_ENVS} ${PYTHON_EXECUTABLE}
${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
add_test(
NAME ${TARGET_NAME}
COMMAND
${CMAKE_COMMAND} -E env PYTHONPATH=${PADDLE_BINARY_DIR}/python
${py_test_modules_ENVS} ${PYTHON_EXECUTABLE}
${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
if(py_test_modules_SERIAL)
......@@ -517,7 +495,7 @@ endforeach()
if((NOT WITH_GPU)
AND (NOT WITH_XPU)
AND NOT (WITH_ASCEND OR WITH_ASCEND_CL))
AND NOT (WITH_ASCEND))
list(REMOVE_ITEM TEST_OPS "test_dist_mnist_batch_merge")
endif()
......@@ -530,7 +508,6 @@ py_test_modules(test_adam_op_multi_thread MODULES test_adam_op ENVS
if(WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
OR APPLE)
py_test_modules(test_warpctc_op MODULES test_warpctc_op)
set_tests_properties(test_warpctc_op PROPERTIES TIMEOUT 120)
......@@ -668,8 +645,7 @@ if(WITH_DISTRIBUTE)
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
if(WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL)
OR WITH_ASCEND)
bash_test_modules(
test_fleet_launch_nproc START_BASH test_fleet_launch_nproc.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
......@@ -683,7 +659,7 @@ if(WITH_DISTRIBUTE)
test_fleet_launch_cloud START_BASH test_fleet_launch_cloud.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
endif()
if(WITH_ASCEND OR WITH_ASCEND_CL)
if(WITH_ASCEND)
bash_test_modules(
test_fleet_launch_ascend START_BASH test_fleet_launch_ascend.sh ENVS
PADDLE_BINARY_DIR=${PADDLE_BINARY_DIR})
......
......@@ -370,9 +370,7 @@ if((WITH_GPU OR WITH_ROCM) AND (LINUX))
endif()
if((WITH_GPU
OR WITH_ROCM
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_gen_nccl_id_op
......
......@@ -6,9 +6,7 @@ set(LOCAL_ALL_ARCH ON)
set(LOCAL_ALL_PLAT ON)
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
py_test_modules(
test_fleet_sharding_meta_optimizer MODULES
......@@ -85,9 +83,7 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_rnn_dp
......@@ -180,9 +176,7 @@ if((WITH_GPU) AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND (LINUX))
py_test_modules(
test_fleet_localsgd_meta_optimizer MODULES
......@@ -276,9 +270,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_lars_meta_optimizer
......@@ -331,9 +323,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_pipeline_meta_optimizer
......@@ -347,9 +337,7 @@ if((WITH_GPU
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
py_test_modules(
test_fleet_gradient_merge_meta_optimizer MODULES
......@@ -368,9 +356,7 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_raw_program_meta_optimizer
......@@ -449,9 +435,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND (LINUX))
py_test_modules(
test_fleet_lamb_meta_optimizer MODULES test_fleet_lamb_meta_optimizer ENVS
......@@ -479,9 +463,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_fleet_pipeline_meta_optimizer_with_recompute
......@@ -495,9 +477,7 @@ if((WITH_GPU
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND (LINUX OR WIN32))
py_test_modules(
test_fleet_hybrid_meta_optimizer MODULES test_fleet_hybrid_meta_optimizer
......@@ -548,9 +528,7 @@ if((WITH_ROCM) AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
py_test_modules(
test_fleet_amp_meta_optimizer MODULES test_fleet_amp_meta_optimizer ENVS
......@@ -605,9 +583,7 @@ if(LOCAL_ALL_ARCH AND LOCAL_ALL_PLAT)
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND (LINUX OR WIN32))
py_test_modules(
test_fleet_recompute_meta_optimizer MODULES
......@@ -621,9 +597,7 @@ if(LOCAL_ALL_ARCH AND (LINUX OR WIN32))
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND LOCAL_ALL_PLAT)
bash_test_modules(
test_new_group
......@@ -636,9 +610,7 @@ if((WITH_GPU
endif()
if((WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL
)
OR WITH_ASCEND)
AND (LINUX))
bash_test_modules(
test_c_comm_init_op
......
......@@ -6,7 +6,7 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}")
if((NOT WITH_GPU)
AND (NOT WITH_XPU)
AND NOT (WITH_ASCEND OR WITH_ASCEND_CL))
AND NOT (WITH_ASCEND))
list(REMOVE_ITEM TEST_OPS "test_dist_fuse_adam_pass")
list(REMOVE_ITEM TEST_OPS "test_dist_fuse_all_reduce_pass")
list(REMOVE_ITEM TEST_OPS "test_dist_fuse_bn_act_pass")
......
......@@ -26,8 +26,7 @@ if((WITH_DISTRIBUTE)
AND (NOT APPLE))
if(WITH_GPU
OR WITH_XPU
OR WITH_ASCEND
OR WITH_ASCEND_CL)
OR WITH_ASCEND)
py_test_modules(test_fleet_with_asp_sharding MODULES
test_fleet_with_asp_sharding ENVS ${dist_ENVS})
endif()
......
#!/usr/bin/env bash
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
......@@ -20,7 +20,7 @@ PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )"
# install lcov
if [ ! -f "/root/.cache/lcov-1.14.tar.gz" ];then
wget -P /home https://paddle-ci.gz.bcebos.com/coverage/lcov-1.14.tar.gz --no-proxy --no-check-certificate || exit 101
wget -P /home https://paddle-ci.gz.bcebos.com/coverage/lcov-1.14.tar.gz --no-proxy --no-check-certificate || exit 101
cp /home/lcov-1.14.tar.gz /root/.cache/lcov-1.14.tar.gz
else
cp /root/.cache/lcov-1.14.tar.gz /home/lcov-1.14.tar.gz
......@@ -113,8 +113,6 @@ function gen_full_html_report_npu() {
if [ ${WITH_XPU:-OFF} == "ON" ]; then
gen_full_html_report_xpu || true
elif [ ${WITH_ASCEND_CL:-OFF} == "ON" ]; then
gen_full_html_report_npu || true
else
gen_full_html_report || true
fi
......@@ -213,8 +211,6 @@ echo "Assert Python Diff Coverage"
if [ ${WITH_XPU:-OFF} == "ON" ]; then
echo "XPU has no python coverage!"
elif [ ${WITH_ASCEND_CL:-OFF} == "ON" ]; then
echo "NPU has no python coverage!"
else
if [[ "${NO_PYTHON_COVERAGE_DATA}" != "1" ]];then
python3.7 ${PADDLE_ROOT}/tools/coverage/coverage_lines.py python-coverage-diff.info 0.9 || PYTHON_COVERAGE_LINES_ASSERT=1
......
#!/usr/bin/env bash
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
......@@ -21,7 +21,7 @@ PADDLE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}")/../../" && pwd )"
function lcov_init(){
# install lcov
if [ ! -f "/root/.cache/lcov-1.14.tar.gz" ];then
wget -P /home https://paddle-ci.gz.bcebos.com/coverage/lcov-1.14.tar.gz --no-proxy --no-check-certificate || exit 101
wget -P /home https://paddle-ci.gz.bcebos.com/coverage/lcov-1.14.tar.gz --no-proxy --no-check-certificate || exit 101
cp /home/lcov-1.14.tar.gz /root/.cache/lcov-1.14.tar.gz
else
cp /root/.cache/lcov-1.14.tar.gz /home/lcov-1.14.tar.gz
......@@ -230,7 +230,7 @@ function covinfo_combine_full(){
mv infer-python-coverage.info python-coverage.info
else
echo "Cannot found python coverage.info"
fi
fi
gen_python_full_html_report || true
gen_full_html_report || true
}
......@@ -243,8 +243,6 @@ function cov_rate_judge(){
if [ ${WITH_XPU:-OFF} == "ON" ]; then
echo "XPU has no python coverage!"
elif [ ${WITH_ASCEND_CL:-OFF} == "ON" ]; then
echo "NPU has no python coverage!"
else
if [[ python-coverage-diff.info ]];then
python ${PADDLE_ROOT}/tools/coverage/coverage_lines.py python-coverage-diff.info 0.9 || PYTHON_COVERAGE_LINES_ASSERT=1
......@@ -269,7 +267,7 @@ function print_usage() {
function main () {
local CMD=$1
lcov_init
case $CMD in
case $CMD in
gen_cov_info)
gen_cpp_covinfo
gen_py_covinfo
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册