From d4f43ad4ced51f9eb4979172d6a4be80090ac530 Mon Sep 17 00:00:00 2001 From: Wen Sun <35923278+HermitSun@users.noreply.github.com> Date: Sat, 17 Dec 2022 20:41:38 +0800 Subject: [PATCH] refactor: rename xccl files (#49127) --- .../distributed/collective/CMakeLists.txt | 28 +-- .../fluid/distributed/collective/HCCLTools.cc | 48 ----- .../fluid/distributed/collective/HCCLTools.h | 184 ------------------ .../collective/ProcessGroupBKCL.cc | 2 +- .../collective/ProcessGroupCustom.cc | 2 +- .../collective/{Common.cc => common.cc} | 2 +- .../collective/{Common.h => common.h} | 0 .../collective/{MPITools.cc => mpi_tools.cc} | 4 +- .../collective/{MPITools.h => mpi_tools.h} | 0 ...cessGroupGloo.cc => process_group_gloo.cc} | 13 +- ...rocessGroupGloo.h => process_group_gloo.h} | 5 + ...rocessGroupMPI.cc => process_group_mpi.cc} | 4 +- ...{ProcessGroupMPI.h => process_group_mpi.h} | 2 +- .../collective/process_group_nccl.cc | 2 +- paddle/fluid/pybind/CMakeLists.txt | 10 +- paddle/fluid/pybind/distributed_py.cc | 4 +- paddle/phi/backends/CMakeLists.txt | 4 +- paddle/phi/kernels/CMakeLists.txt | 2 +- 18 files changed, 49 insertions(+), 267 deletions(-) delete mode 100644 paddle/fluid/distributed/collective/HCCLTools.cc delete mode 100644 paddle/fluid/distributed/collective/HCCLTools.h rename paddle/fluid/distributed/collective/{Common.cc => common.cc} (97%) rename paddle/fluid/distributed/collective/{Common.h => common.h} (100%) rename paddle/fluid/distributed/collective/{MPITools.cc => mpi_tools.cc} (94%) rename paddle/fluid/distributed/collective/{MPITools.h => mpi_tools.h} (100%) rename paddle/fluid/distributed/collective/{ProcessGroupGloo.cc => process_group_gloo.cc} (97%) rename paddle/fluid/distributed/collective/{ProcessGroupGloo.h => process_group_gloo.h} (97%) rename paddle/fluid/distributed/collective/{ProcessGroupMPI.cc => process_group_mpi.cc} (99%) rename paddle/fluid/distributed/collective/{ProcessGroupMPI.h => process_group_mpi.h} (99%) diff --git a/paddle/fluid/distributed/collective/CMakeLists.txt b/paddle/fluid/distributed/collective/CMakeLists.txt index de9059228a6..7bc0d1310bf 100644 --- a/paddle/fluid/distributed/collective/CMakeLists.txt +++ b/paddle/fluid/distributed/collective/CMakeLists.txt @@ -1,5 +1,5 @@ cc_library( - processgroup + process_group SRCS ProcessGroup.cc DEPS dense_tensor) cc_library( @@ -9,20 +9,20 @@ cc_library( cc_library( eager_reducer SRCS reducer.cc - DEPS eager_api processgroup process_group_stream phi_api string_helper) + DEPS eager_api process_group process_group_stream phi_api string_helper) if(WITH_DISTRIBUTE) cc_library( - processgroup_gloo - SRCS ProcessGroupGloo.cc + process_group_gloo + SRCS process_group_gloo.cc DEPS phi_api eager_api gloo_wrapper) endif() if(WITH_NCCL OR WITH_RCCL) cc_library( process_group_nccl - SRCS process_group_nccl.cc nccl_tools.cc Common.cc check.cc - DEPS processgroup + SRCS process_group_nccl.cc nccl_tools.cc common.cc check.cc + DEPS process_group process_group_stream place enforce @@ -34,23 +34,23 @@ endif() if(WITH_XPU_BKCL) cc_library( - processgroup_bkcl - SRCS ProcessGroupBKCL.cc BKCLTools.cc Common.cc - DEPS processgroup place enforce collective_helper device_context + process_group_bkcl + SRCS ProcessGroupBKCL.cc BKCLTools.cc common.cc + DEPS process_group place enforce collective_helper device_context dense_tensor) endif() if(WITH_MPI) cc_library( - processgroup_mpi - SRCS ProcessGroupMPI.cc MPITools.cc Common.cc + process_group_mpi + SRCS process_group_mpi.cc mpi_tools.cc common.cc DEPS collective_helper device_context) endif() if(WITH_CUSTOM_DEVICE) cc_library( - processgroup_custom - SRCS ProcessGroupCustom.cc CustomCCLTools.cc Common.cc - DEPS processgroup phi_backends place enforce collective_helper + process_group_custom + SRCS ProcessGroupCustom.cc CustomCCLTools.cc common.cc + DEPS process_group phi_backends place enforce collective_helper device_context) endif() diff --git a/paddle/fluid/distributed/collective/HCCLTools.cc b/paddle/fluid/distributed/collective/HCCLTools.cc deleted file mode 100644 index 7ab3aa87b08..00000000000 --- a/paddle/fluid/distributed/collective/HCCLTools.cc +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/distributed/collective/HCCLTools.h" - -#include "paddle/fluid/distributed/collective/Types.h" - -namespace paddle { -namespace distributed { - -HcclReduceOp ToHCCLRedType(ReduceOp reduction) { - static const std::map red_type = { - {ReduceOp::MIN, HCCL_REDUCE_MIN}, - {ReduceOp::MAX, HCCL_REDUCE_MAX}, - {ReduceOp::SUM, HCCL_REDUCE_SUM}, - {ReduceOp::PRODUCT, HCCL_REDUCE_PROD}, - }; - auto it = red_type.find(reduction); - PADDLE_ENFORCE_EQ( - it != red_type.end(), - true, - platform::errors::InvalidArgument("Invalid hccl reduction. " - "Must be Min | Max | Prod | Sum")); - return it->second; -} - -std::string SerializeHCCLUniqueId(const HcclRootInfo& hcclID) { - const uint8_t* bytes = reinterpret_cast(&hcclID); - std::ostringstream oss; - for (size_t i = 0; i < sizeof(hcclID); ++i) { - oss << std::hex << static_cast(bytes[i]); - } - return oss.str(); -} - -} // namespace distributed -} // namespace paddle diff --git a/paddle/fluid/distributed/collective/HCCLTools.h b/paddle/fluid/distributed/collective/HCCLTools.h deleted file mode 100644 index 89ce00fe874..00000000000 --- a/paddle/fluid/distributed/collective/HCCLTools.h +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include - -#include - -#include "paddle/fluid/distributed/collective/Types.h" -#include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/variable.h" -#include "paddle/fluid/platform/collective_helper.h" -#include "paddle/fluid/platform/device/npu/enforce_npu.h" -#include "paddle/fluid/platform/device/npu/npu_info.h" -#include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/enforce.h" -#include "paddle/utils/variant.h" - -namespace paddle { -namespace distributed { - -class NPUEventManager { - public: - NPUEventManager() = default; - - ~NPUEventManager() { - if (is_created_) { - platform::NPUDeviceGuard guard(device_index_); - platform::NPUEventDestroy(event_); - } - } - - NPUEventManager(const NPUEventManager&) = delete; - NPUEventManager& operator=(const NPUEventManager&) = delete; - - NPUEventManager(NPUEventManager&& other) { - std::swap(is_created_, other.is_created_); - std::swap(device_index_, other.device_index_); - std::swap(event_, other.event_); - } - - NPUEventManager& operator=(NPUEventManager&& other) { - std::swap(is_created_, other.is_created_); - std::swap(device_index_, other.device_index_); - std::swap(event_, other.event_); - return *this; - } - - bool IsCreated() const { return is_created_; } - bool DeviceId() const { return device_index_; } - aclrtEvent GetRawNPUEvent() const { return event_; } - - void Record(const paddle::platform::NPUDeviceContext& ctx) { - auto device_index = ctx.GetPlace().device; - if (!is_created_) { - CreateEvent(device_index); - } - PADDLE_ENFORCE_EQ(device_index, - device_index_, - platform::errors::PreconditionNotMet( - "NPUDeviceContext's device %d does not match" - "Event's device %d", - device_index, - device_index_)); - - platform::NPUDeviceGuard guard(device_index_); - platform::NPUEventRecord(event_, ctx.stream()); - } - - bool Query() const { - aclrtEventStatus status = ACL_EVENT_STATUS_COMPLETE; - platform::NPUEventQuery(event_, &status); - if (status == ACL_EVENT_STATUS_COMPLETE) { - return true; - } - return false; - } - - void Block(const paddle::platform::NPUDeviceContext& ctx) const { - if (is_created_) { - auto device_index = ctx.GetPlace().device; - PADDLE_ENFORCE_EQ(device_index, - device_index_, - platform::errors::PreconditionNotMet( - "phi::GPUContext's device %d does not match" - "Event's device %d", - device_index, - device_index_)); - platform::NPUDeviceGuard guard(device_index_); - platform::NPUStreamWaitEvent(ctx.stream(), event_); - } - } - - private: - bool is_created_{false}; - aclrtEvent event_{}; - int8_t device_index_{0}; - - private: - void CreateEvent(int device_index) { - device_index_ = device_index; - platform::NPUDeviceGuard guard(device_index); - platform::NPUEventCreate(&event_); - is_created_ = true; - } -}; - -class HCCLCommManager { - public: - explicit HCCLCommManager(HcclComm hcclComm) : hccl_comm_(hcclComm) {} - - HCCLCommManager() : HCCLCommManager(nullptr) {} - - ~HCCLCommManager() noexcept { - std::unique_lock lock(mutex_); - if (hccl_comm_) { - platform::dynload::HcclCommDestroy(hccl_comm_); - } - } - - static std::shared_ptr Create(int num_ranks, - int rank, - HcclRootInfo* comm_id, - HcclComm hccl_comm) { - auto hccl_manager = std::make_shared(); - auto ret = platform::dynload::HcclCommInitRootInfo( - num_ranks, comm_id, rank, &hccl_comm); - using __NPU_STATUS_TYPE__ = decltype(ret); - constexpr auto __success_type__ = - platform::details::NPUStatusType<__NPU_STATUS_TYPE__>::kSuccess; - if (UNLIKELY(ret != __success_type__)) { - VLOG(0) << "Error: create hccl_id error."; - exit(-1); - } - - hccl_manager->hccl_id_ = comm_id; - hccl_manager->rank_ = rank; - hccl_manager->hccl_comm_ = hccl_comm; - return hccl_manager; - } - - HcclRootInfo* GetHcclId() const { - std::unique_lock lock(mutex_); - return hccl_id_; - } - - HcclComm GetHcclComm() const { - std::unique_lock lock(mutex_); - return hccl_comm_; - } - - HCCLCommManager(const HCCLCommManager&) = delete; - HCCLCommManager& operator=(const HCCLCommManager&) = delete; - HCCLCommManager& operator=(HCCLCommManager&& other) = delete; - - HCCLCommManager(HCCLCommManager&& other) { - std::unique_lock lock(other.mutex_); - std::swap(hccl_comm_, other.hccl_comm_); - } - - protected: - HcclComm hccl_comm_; - HcclRootInfo* hccl_id_; - int rank_; - mutable std::mutex mutex_; -}; - -HcclReduceOp ToHCCLRedType(ReduceOp reduction); -std::string SerializeHCCLUniqueId(const HcclRootInfo& hcclID); - -} // namespace distributed -} // namespace paddle diff --git a/paddle/fluid/distributed/collective/ProcessGroupBKCL.cc b/paddle/fluid/distributed/collective/ProcessGroupBKCL.cc index 135621ff5f6..25a85f679a3 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupBKCL.cc +++ b/paddle/fluid/distributed/collective/ProcessGroupBKCL.cc @@ -15,7 +15,7 @@ #include "paddle/fluid/distributed/collective/ProcessGroupBKCL.h" #include "paddle/fluid/distributed/collective/BKCLTools.h" -#include "paddle/fluid/distributed/collective/Common.h" +#include "paddle/fluid/distributed/collective/common.h" #include "paddle/fluid/platform/device/xpu/bkcl_helper.h" #include "paddle/fluid/platform/device/xpu/xpu_info.h" #include "paddle/fluid/platform/device_context.h" diff --git a/paddle/fluid/distributed/collective/ProcessGroupCustom.cc b/paddle/fluid/distributed/collective/ProcessGroupCustom.cc index 7f75fc17846..e4e79d8be62 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupCustom.cc +++ b/paddle/fluid/distributed/collective/ProcessGroupCustom.cc @@ -14,8 +14,8 @@ #include "paddle/fluid/distributed/collective/ProcessGroupCustom.h" -#include "paddle/fluid/distributed/collective/Common.h" #include "paddle/fluid/distributed/collective/CustomCCLTools.h" +#include "paddle/fluid/distributed/collective/common.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/place.h" diff --git a/paddle/fluid/distributed/collective/Common.cc b/paddle/fluid/distributed/collective/common.cc similarity index 97% rename from paddle/fluid/distributed/collective/Common.cc rename to paddle/fluid/distributed/collective/common.cc index d5cac8ec687..e60ecf9b8dc 100644 --- a/paddle/fluid/distributed/collective/Common.cc +++ b/paddle/fluid/distributed/collective/common.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/distributed/collective/Common.h" +#include "paddle/fluid/distributed/collective/common.h" namespace paddle { namespace distributed { diff --git a/paddle/fluid/distributed/collective/Common.h b/paddle/fluid/distributed/collective/common.h similarity index 100% rename from paddle/fluid/distributed/collective/Common.h rename to paddle/fluid/distributed/collective/common.h diff --git a/paddle/fluid/distributed/collective/MPITools.cc b/paddle/fluid/distributed/collective/mpi_tools.cc similarity index 94% rename from paddle/fluid/distributed/collective/MPITools.cc rename to paddle/fluid/distributed/collective/mpi_tools.cc index 042169728db..65831cf890c 100644 --- a/paddle/fluid/distributed/collective/MPITools.cc +++ b/paddle/fluid/distributed/collective/mpi_tools.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/distributed/collective/MPITools.h" -#include "paddle/fluid/distributed/collective/Common.h" +#include "paddle/fluid/distributed/collective/mpi_tools.h" #include "paddle/fluid/distributed/collective/Types.h" +#include "paddle/fluid/distributed/collective/common.h" namespace paddle { namespace distributed { diff --git a/paddle/fluid/distributed/collective/MPITools.h b/paddle/fluid/distributed/collective/mpi_tools.h similarity index 100% rename from paddle/fluid/distributed/collective/MPITools.h rename to paddle/fluid/distributed/collective/mpi_tools.h diff --git a/paddle/fluid/distributed/collective/ProcessGroupGloo.cc b/paddle/fluid/distributed/collective/process_group_gloo.cc similarity index 97% rename from paddle/fluid/distributed/collective/ProcessGroupGloo.cc rename to paddle/fluid/distributed/collective/process_group_gloo.cc index 5e194b3ccf6..d5477549385 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupGloo.cc +++ b/paddle/fluid/distributed/collective/process_group_gloo.cc @@ -28,8 +28,8 @@ #include #include -#include "paddle/fluid/distributed/collective/Common.h" -#include "paddle/fluid/distributed/collective/ProcessGroupGloo.h" +#include "paddle/fluid/distributed/collective/common.h" +#include "paddle/fluid/distributed/collective/process_group_gloo.h" #include "paddle/fluid/framework/fleet/gloo_wrapper.h" #include "paddle/fluid/platform/enforce.h" @@ -400,6 +400,15 @@ class AllgatherGlooTask : public ProcessGroupGloo::GlooTask { } }; +std::shared_ptr ProcessGroupGloo::AllGather( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + bool sync_op) { + std::vector in_wrapper{in_tensor}; + std::vector out_wrapper{*out_tensor}; + return AllGather(in_wrapper, out_wrapper, true); +} + std::shared_ptr ProcessGroupGloo::AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, diff --git a/paddle/fluid/distributed/collective/ProcessGroupGloo.h b/paddle/fluid/distributed/collective/process_group_gloo.h similarity index 97% rename from paddle/fluid/distributed/collective/ProcessGroupGloo.h rename to paddle/fluid/distributed/collective/process_group_gloo.h index 1beacfca4ff..cdb5255c8db 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupGloo.h +++ b/paddle/fluid/distributed/collective/process_group_gloo.h @@ -120,6 +120,11 @@ class ProcessGroupGloo : public ProcessGroup { int64_t /*numel*/, // for compatibility, no use now bool sync_op) override; + std::shared_ptr AllGather( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + bool sync_op) override; + std::shared_ptr AllReduce( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, diff --git a/paddle/fluid/distributed/collective/ProcessGroupMPI.cc b/paddle/fluid/distributed/collective/process_group_mpi.cc similarity index 99% rename from paddle/fluid/distributed/collective/ProcessGroupMPI.cc rename to paddle/fluid/distributed/collective/process_group_mpi.cc index 66edb52b66d..796e0bb692f 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupMPI.cc +++ b/paddle/fluid/distributed/collective/process_group_mpi.cc @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/distributed/collective/ProcessGroupMPI.h" +#include "paddle/fluid/distributed/collective/process_group_mpi.h" #include -#include "paddle/fluid/distributed/collective/Common.h" +#include "paddle/fluid/distributed/collective/common.h" constexpr int64_t kWaitBlockTImeout = 10; namespace paddle { diff --git a/paddle/fluid/distributed/collective/ProcessGroupMPI.h b/paddle/fluid/distributed/collective/process_group_mpi.h similarity index 99% rename from paddle/fluid/distributed/collective/ProcessGroupMPI.h rename to paddle/fluid/distributed/collective/process_group_mpi.h index d877f856dc1..635d81d6103 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupMPI.h +++ b/paddle/fluid/distributed/collective/process_group_mpi.h @@ -30,7 +30,7 @@ #include "paddle/fluid/platform/device_context.h" #if defined(PADDLE_WITH_MPI) -#include "paddle/fluid/distributed/collective/MPITools.h" +#include "paddle/fluid/distributed/collective/mpi_tools.h" #endif namespace paddle { diff --git a/paddle/fluid/distributed/collective/process_group_nccl.cc b/paddle/fluid/distributed/collective/process_group_nccl.cc index 0859708f92c..1353ea719a8 100644 --- a/paddle/fluid/distributed/collective/process_group_nccl.cc +++ b/paddle/fluid/distributed/collective/process_group_nccl.cc @@ -14,8 +14,8 @@ #include "paddle/fluid/distributed/collective/process_group_nccl.h" -#include "paddle/fluid/distributed/collective/Common.h" #include "paddle/fluid/distributed/collective/check.h" +#include "paddle/fluid/distributed/collective/common.h" #include "paddle/fluid/distributed/collective/nccl_tools.h" #include "paddle/fluid/distributed/collective/utils.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 37e085b82bc..55accf7e046 100755 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -155,21 +155,21 @@ if(WITH_CUSTOM_DEVICE) endif() if(WITH_PYTHON) - set(PYBIND_DEPS ${PYBIND_DEPS} processgroup eager_reducer) + set(PYBIND_DEPS ${PYBIND_DEPS} process_group eager_reducer) if(WITH_NCCL OR WITH_RCCL) set(PYBIND_DEPS ${PYBIND_DEPS} process_group_nccl) endif() if(WITH_XPU_BKCL) - set(PYBIND_DEPS ${PYBIND_DEPS} processgroup_bkcl) + set(PYBIND_DEPS ${PYBIND_DEPS} process_group_bkcl) endif() if(WITH_GLOO) - set(PYBIND_DEPS ${PYBIND_DEPS} processgroup_gloo) + set(PYBIND_DEPS ${PYBIND_DEPS} process_group_gloo) endif() if(WITH_MPI) - set(PYBIND_DEPS ${PYBIND_DEPS} processgroup_mpi) + set(PYBIND_DEPS ${PYBIND_DEPS} process_group_mpi) endif() if(WITH_CUSTOM_DEVICE) - set(PYBIND_DEPS ${PYBIND_DEPS} processgroup_custom) + set(PYBIND_DEPS ${PYBIND_DEPS} process_group_custom) endif() if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 7.0) set(DISTRIBUTE_COMPILE_FLAGS "${DISTRIBUTE_COMPILE_FLAGS} -faligned-new") diff --git a/paddle/fluid/pybind/distributed_py.cc b/paddle/fluid/pybind/distributed_py.cc index e9d59132d0e..913d8c22b9f 100644 --- a/paddle/fluid/pybind/distributed_py.cc +++ b/paddle/fluid/pybind/distributed_py.cc @@ -38,7 +38,7 @@ limitations under the License. */ #endif #if defined(PADDLE_WITH_MPI) -#include "paddle/fluid/distributed/collective/ProcessGroupMPI.h" +#include "paddle/fluid/distributed/collective/process_group_mpi.h" #endif #if defined(PADDLE_WITH_CUSTOM_DEVICE) @@ -46,7 +46,7 @@ limitations under the License. */ #endif #if defined(PADDLE_WITH_GLOO) -#include "paddle/fluid/distributed/collective/ProcessGroupGloo.h" +#include "paddle/fluid/distributed/collective/process_group_gloo.h" #include "paddle/fluid/distributed/store/tcp_store.h" #endif diff --git a/paddle/phi/backends/CMakeLists.txt b/paddle/phi/backends/CMakeLists.txt index c9e110ae7b8..3ee28c31058 100644 --- a/paddle/phi/backends/CMakeLists.txt +++ b/paddle/phi/backends/CMakeLists.txt @@ -65,12 +65,12 @@ if(WITH_CUSTOM_DEVICE) DEPS phi_capi) endif() -set(COMM_UTILS_DEPS processgroup) +set(COMM_UTILS_DEPS process_group) if(WITH_NCCL OR WITH_RCCL) set(COMM_UTILS_DEPS ${PROCESS_GROUP_UTILS_DEPS} process_group_nccl) endif() if(WITH_CUSTOM_DEVICE) - set(COMM_UTILS_DEPS ${PROCESS_GROUP_UTILS_DEPS} processgroup_custom) + set(COMM_UTILS_DEPS ${PROCESS_GROUP_UTILS_DEPS} process_group_custom) endif() cc_library( processgroup_comm_utils diff --git a/paddle/phi/kernels/CMakeLists.txt b/paddle/phi/kernels/CMakeLists.txt index abe35f284d6..735ba7beaa2 100644 --- a/paddle/phi/kernels/CMakeLists.txt +++ b/paddle/phi/kernels/CMakeLists.txt @@ -78,7 +78,7 @@ set(COMMON_KERNEL_DEPS gpc utf8proc) -set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} processgroup) +set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} process_group) if(WITH_NCCL OR WITH_RCCL) set(COMMON_KERNEL_DEPS ${COMMON_KERNEL_DEPS} process_group_nccl) endif() -- GitLab