From 67c6cfe0c61cd4fcfc7cc53e47d5327727b8a312 Mon Sep 17 00:00:00 2001 From: Kim Yann Date: Thu, 20 Apr 2023 15:02:42 +0800 Subject: [PATCH] rem hccl keywords (#53058) --- .../fluid/distributed/collective/custom_ccl_tools.cc | 2 +- .../distributed/collective/process_group_custom.cc | 11 ++++++----- paddle/fluid/imperative/reducer.cc | 8 -------- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/distributed/collective/custom_ccl_tools.cc b/paddle/fluid/distributed/collective/custom_ccl_tools.cc index fe1ba628d3e..9d95bcf3588 100644 --- a/paddle/fluid/distributed/collective/custom_ccl_tools.cc +++ b/paddle/fluid/distributed/collective/custom_ccl_tools.cc @@ -29,7 +29,7 @@ phi::ccl::CCLReduceOp ToCustomCCLRedType(ReduceOp reduction) { PADDLE_ENFORCE_EQ( it != red_type.end(), true, - platform::errors::InvalidArgument("Invalid hccl reduction. " + platform::errors::InvalidArgument("Invalid CustomCCL reduction. " "Must be Min | Max | Prod | Sum")); return it->second; } diff --git a/paddle/fluid/distributed/collective/process_group_custom.cc b/paddle/fluid/distributed/collective/process_group_custom.cc index 9b21c1d0d12..fbb4a38ea52 100644 --- a/paddle/fluid/distributed/collective/process_group_custom.cc +++ b/paddle/fluid/distributed/collective/process_group_custom.cc @@ -139,11 +139,12 @@ void ProcessGroupCustom::BroadcastUniqueCustomID( // create CustomCCLManager cache for places_key void ProcessGroupCustom::CreateCustomManagerCache( const std::string& places_key, const std::vector& places) { - PADDLE_ENFORCE_EQ(places_key.empty(), - false, - platform::errors::PreconditionNotMet( - "Not able to create/get the HCCL Communicator since " - "the NPU place are not known")); + PADDLE_ENFORCE_EQ( + places_key.empty(), + false, + platform::errors::PreconditionNotMet( + "Not able to create/get the CustomCCL Communicator since " + "the NPU place are not known")); const std::string device_type = places.back().GetDeviceType(); std::vector> ccl_comms; diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index f0b102fdd82..8c8ff75b2de 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -250,10 +250,6 @@ void Group::ConcatTensors(const platform::DeviceContext &context) { "Paddle can't concat xpu grads since it's not compiled with BKCL," "Please recompile or reinstall Paddle with BKCL support.")); #endif - } else if (platform::is_npu_place(place)) { - PADDLE_THROW(platform::errors::PermissionDenied( - "Paddle can't concat npu grads since it's not compiled with HCCL," - "Please recompile or reinstall Paddle with HCCL support.")); } else if (platform::is_cpu_place(place)) { ConcatTensorsWithType(static_cast(context), dense_tensors_, @@ -290,10 +286,6 @@ void Group::SplitTensors(const platform::DeviceContext &context) { "Paddle can't split xpu grad since it's not compiled with BKCL," "Please recompile or reinstall Paddle with BKCL support.")); #endif - } else if (platform::is_npu_place(place)) { - PADDLE_THROW(platform::errors::PermissionDenied( - "Paddle can't split npu grad since it's not compiled with HCCL," - "Please recompile or reinstall Paddle with HCCL support.")); } else if (platform::is_cpu_place(place)) { SplitTensorsWithType(static_cast(context), &dense_contents_, -- GitLab