From 3fec7a6e8c09595de9f8cb35a1bd46ca3fa35fa5 Mon Sep 17 00:00:00 2001 From: Wen Sun <35923278+HermitSun@users.noreply.github.com> Date: Thu, 15 Dec 2022 09:40:18 +0800 Subject: [PATCH] fix: gloo compatible (#49084) --- .../distributed/collective/ProcessGroupGloo.cc | 14 ++++++++++++-- .../distributed/collective/ProcessGroupGloo.h | 10 ++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/distributed/collective/ProcessGroupGloo.cc b/paddle/fluid/distributed/collective/ProcessGroupGloo.cc index 03eeea0684c..5e194b3ccf6 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupGloo.cc +++ b/paddle/fluid/distributed/collective/ProcessGroupGloo.cc @@ -310,6 +310,16 @@ class AllreduceGlooTask : public ProcessGroupGloo::GlooTask { } }; +std::shared_ptr ProcessGroupGloo::AllReduce( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const AllreduceOptions& opts, + bool sync_op) { + std::vector in_wrapper{in_tensor}; + std::vector out_wrapper{*out_tensor}; + return AllReduce(in_wrapper, out_wrapper, opts, true); +} + std::shared_ptr ProcessGroupGloo::AllReduce( std::vector& inputs, std::vector& outputs, @@ -393,8 +403,8 @@ class AllgatherGlooTask : public ProcessGroupGloo::GlooTask { std::shared_ptr ProcessGroupGloo::AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, - int64_t offset, // for compatibility, no use now - int64_t numel, // for compatibility, no use now + int64_t /*offset*/, + int64_t /*offset*/, bool sync_op) { std::vector in_wrapper{in_tensor}; std::vector out_wrapper{*out_tensor}; diff --git a/paddle/fluid/distributed/collective/ProcessGroupGloo.h b/paddle/fluid/distributed/collective/ProcessGroupGloo.h index 67294932926..1beacfca4ff 100644 --- a/paddle/fluid/distributed/collective/ProcessGroupGloo.h +++ b/paddle/fluid/distributed/collective/ProcessGroupGloo.h @@ -116,8 +116,14 @@ class ProcessGroupGloo : public ProcessGroup { std::shared_ptr AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, - int64_t offset, // for compatibility, no use now - int64_t numel, // for compatibility, no use now + int64_t /*offset*/, // for compatibility, no use now + int64_t /*numel*/, // for compatibility, no use now + bool sync_op) override; + + std::shared_ptr AllReduce( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const AllreduceOptions& opts, bool sync_op) override; std::shared_ptr Broadcast( -- GitLab