From 215c7ae7c1685ba1b0e6aaf832f6d095fa26790d Mon Sep 17 00:00:00 2001 From: Wen Sun <35923278+HermitSun@users.noreply.github.com> Date: Fri, 6 Jan 2023 11:36:16 +0800 Subject: [PATCH] Fix hidden overloaded functions in process group (#49576) * fix: fix hidden virtual funcs * fix: add default impl --- .../collective/process_group_with_stream.h | 110 ++++++++++++++++-- .../collective/process_group_without_stream.h | 36 +++++- 2 files changed, 130 insertions(+), 16 deletions(-) diff --git a/paddle/fluid/distributed/collective/process_group_with_stream.h b/paddle/fluid/distributed/collective/process_group_with_stream.h index 375d230cf6..cfdb7b1655 100644 --- a/paddle/fluid/distributed/collective/process_group_with_stream.h +++ b/paddle/fluid/distributed/collective/process_group_with_stream.h @@ -62,17 +62,6 @@ class ProcessGroupWithStream : public ProcessGroup { virtual ~ProcessGroupWithStream() = default; - // methods from base class - using ProcessGroup::AllGather; - using ProcessGroup::AllReduce; - using ProcessGroup::AllToAll; - using ProcessGroup::Broadcast; - using ProcessGroup::Recv; - using ProcessGroup::Reduce; - using ProcessGroup::ReduceScatter; - using ProcessGroup::Scatter; - using ProcessGroup::Send; - std::shared_ptr AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -111,6 +100,18 @@ class ProcessGroupWithStream : public ProcessGroup { use_calc_stream); } + std::shared_ptr AllGather( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + int64_t offset, + int64_t numel, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support all_gather.", + GetBackendName())); + } + std::shared_ptr AllReduce( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -123,6 +124,17 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr AllReduce( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const AllreduceOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support all_reduce.", + GetBackendName())); + } + std::shared_ptr AllToAll( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -137,6 +149,18 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr AllToAll( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const std::vector& out_size_each_rank, + const std::vector& in_size_each_rank, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support all_to_all.", + GetBackendName())); + } + std::shared_ptr Broadcast( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -149,6 +173,17 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr Broadcast( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const BroadcastOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support broadcast.", + GetBackendName())); + } + std::shared_ptr Reduce(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ReduceOptions& opts, @@ -160,6 +195,16 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr Reduce(phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const ReduceOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support reduce.", + GetBackendName())); + } + std::shared_ptr ReduceScatter( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -172,6 +217,17 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr ReduceScatter( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const ReduceScatterOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support reduce_scatter.", + GetBackendName())); + } + std::shared_ptr Scatter(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ScatterOptions& opts, @@ -183,6 +239,16 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr Scatter(phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const ScatterOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support scatter.", + GetBackendName())); + } + std::shared_ptr Recv(phi::DenseTensor* tensor, int src_rank, bool sync_op) override { @@ -218,6 +284,17 @@ class ProcessGroupWithStream : public ProcessGroup { use_calc_stream); } + std::shared_ptr Recv(phi::DenseTensor* tensor, + int src_rank, + int64_t offset, + int64_t numel, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support recv.", + GetBackendName())); + } + std::shared_ptr Send(const phi::DenseTensor& tensor, int dst_rank, bool sync_op) override { @@ -252,6 +329,17 @@ class ProcessGroupWithStream : public ProcessGroup { sync_op, use_calc_stream); } + + std::shared_ptr Send(const phi::DenseTensor& tensor, + int dst_rank, + int64_t offset, + int64_t numel, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support send.", + GetBackendName())); + } }; } // namespace distributed diff --git a/paddle/fluid/distributed/collective/process_group_without_stream.h b/paddle/fluid/distributed/collective/process_group_without_stream.h index ee05906966..dd22c0f1e4 100644 --- a/paddle/fluid/distributed/collective/process_group_without_stream.h +++ b/paddle/fluid/distributed/collective/process_group_without_stream.h @@ -28,11 +28,6 @@ class ProcessGroupWithoutStream : public ProcessGroup { virtual ~ProcessGroupWithoutStream() = default; - // methods from base class - using ProcessGroup::AllGather; - using ProcessGroup::Recv; - using ProcessGroup::Send; - std::shared_ptr AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -44,6 +39,17 @@ class ProcessGroupWithoutStream : public ProcessGroup { sync_op); } + std::shared_ptr AllGather( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + int64_t offset, + int64_t numel, + bool sync_op) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithoutStream (%s) does not support all_gather.", + GetBackendName())); + } + std::shared_ptr Recv(phi::DenseTensor* tensor, int src_rank, bool sync_op) override { @@ -54,6 +60,16 @@ class ProcessGroupWithoutStream : public ProcessGroup { sync_op); } + std::shared_ptr Recv(phi::DenseTensor* tensor, + int src_rank, + int64_t offset, + int64_t numel, + bool sync_op) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithoutStream (%s) does not support recv.", + GetBackendName())); + } + std::shared_ptr Send(const phi::DenseTensor& tensor, int dst_rank, bool sync_op) override { @@ -63,6 +79,16 @@ class ProcessGroupWithoutStream : public ProcessGroup { /*numel*/ -1, // -1 indicates the whole tensor sync_op); } + + std::shared_ptr Send(const phi::DenseTensor& tensor, + int dst_rank, + int64_t offset, + int64_t numel, + bool sync_op) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithoutStream (%s) does not support send.", + GetBackendName())); + } }; } // namespace distributed -- GitLab