diff --git a/paddle/fluid/distributed/collective/custom_ccl_tools.cc b/paddle/fluid/distributed/collective/custom_ccl_tools.cc index fe1ba628d3e45cb17ea26d81df50aba52a476cb5..9d95bcf3588d5d59bba059f8cbff014c67ab2407 100644 --- a/paddle/fluid/distributed/collective/custom_ccl_tools.cc +++ b/paddle/fluid/distributed/collective/custom_ccl_tools.cc @@ -29,7 +29,7 @@ phi::ccl::CCLReduceOp ToCustomCCLRedType(ReduceOp reduction) { PADDLE_ENFORCE_EQ( it != red_type.end(), true, - platform::errors::InvalidArgument("Invalid hccl reduction. " + platform::errors::InvalidArgument("Invalid CustomCCL reduction. " "Must be Min | Max | Prod | Sum")); return it->second; } diff --git a/paddle/fluid/distributed/collective/process_group_custom.cc b/paddle/fluid/distributed/collective/process_group_custom.cc index 9b21c1d0d12e7fdc49f451cd2ce6b629598ce55e..fbb4a38ea520da8275cbbd0b02469b3937fec402 100644 --- a/paddle/fluid/distributed/collective/process_group_custom.cc +++ b/paddle/fluid/distributed/collective/process_group_custom.cc @@ -139,11 +139,12 @@ void ProcessGroupCustom::BroadcastUniqueCustomID( // create CustomCCLManager cache for places_key void ProcessGroupCustom::CreateCustomManagerCache( const std::string& places_key, const std::vector& places) { - PADDLE_ENFORCE_EQ(places_key.empty(), - false, - platform::errors::PreconditionNotMet( - "Not able to create/get the HCCL Communicator since " - "the NPU place are not known")); + PADDLE_ENFORCE_EQ( + places_key.empty(), + false, + platform::errors::PreconditionNotMet( + "Not able to create/get the CustomCCL Communicator since " + "the NPU place are not known")); const std::string device_type = places.back().GetDeviceType(); std::vector> ccl_comms; diff --git a/paddle/fluid/imperative/reducer.cc b/paddle/fluid/imperative/reducer.cc index f0b102fdd82487d5ab875cefe6857bc41b75d9ea..8c8ff75b2de96994f656fa07e79cc458d266a7d0 100644 --- a/paddle/fluid/imperative/reducer.cc +++ b/paddle/fluid/imperative/reducer.cc @@ -250,10 +250,6 @@ void Group::ConcatTensors(const platform::DeviceContext &context) { "Paddle can't concat xpu grads since it's not compiled with BKCL," "Please recompile or reinstall Paddle with BKCL support.")); #endif - } else if (platform::is_npu_place(place)) { - PADDLE_THROW(platform::errors::PermissionDenied( - "Paddle can't concat npu grads since it's not compiled with HCCL," - "Please recompile or reinstall Paddle with HCCL support.")); } else if (platform::is_cpu_place(place)) { ConcatTensorsWithType(static_cast(context), dense_tensors_, @@ -290,10 +286,6 @@ void Group::SplitTensors(const platform::DeviceContext &context) { "Paddle can't split xpu grad since it's not compiled with BKCL," "Please recompile or reinstall Paddle with BKCL support.")); #endif - } else if (platform::is_npu_place(place)) { - PADDLE_THROW(platform::errors::PermissionDenied( - "Paddle can't split npu grad since it's not compiled with HCCL," - "Please recompile or reinstall Paddle with HCCL support.")); } else if (platform::is_cpu_place(place)) { SplitTensorsWithType(static_cast(context), &dense_contents_,