diff --git a/paddle/fluid/distributed/collective/process_group_with_stream.h b/paddle/fluid/distributed/collective/process_group_with_stream.h index 375d230cf68fb21e86cd9876b6a1f50f2aaebe16..cfdb7b16553f30b5e2f46346889e5f2ee20a46ff 100644 --- a/paddle/fluid/distributed/collective/process_group_with_stream.h +++ b/paddle/fluid/distributed/collective/process_group_with_stream.h @@ -62,17 +62,6 @@ class ProcessGroupWithStream : public ProcessGroup { virtual ~ProcessGroupWithStream() = default; - // methods from base class - using ProcessGroup::AllGather; - using ProcessGroup::AllReduce; - using ProcessGroup::AllToAll; - using ProcessGroup::Broadcast; - using ProcessGroup::Recv; - using ProcessGroup::Reduce; - using ProcessGroup::ReduceScatter; - using ProcessGroup::Scatter; - using ProcessGroup::Send; - std::shared_ptr AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -111,6 +100,18 @@ class ProcessGroupWithStream : public ProcessGroup { use_calc_stream); } + std::shared_ptr AllGather( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + int64_t offset, + int64_t numel, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support all_gather.", + GetBackendName())); + } + std::shared_ptr AllReduce( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -123,6 +124,17 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr AllReduce( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const AllreduceOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support all_reduce.", + GetBackendName())); + } + std::shared_ptr AllToAll( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -137,6 +149,18 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr AllToAll( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const std::vector& out_size_each_rank, + const std::vector& in_size_each_rank, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support all_to_all.", + GetBackendName())); + } + std::shared_ptr Broadcast( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -149,6 +173,17 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr Broadcast( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const BroadcastOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support broadcast.", + GetBackendName())); + } + std::shared_ptr Reduce(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ReduceOptions& opts, @@ -160,6 +195,16 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr Reduce(phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const ReduceOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support reduce.", + GetBackendName())); + } + std::shared_ptr ReduceScatter( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -172,6 +217,17 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr ReduceScatter( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const ReduceScatterOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support reduce_scatter.", + GetBackendName())); + } + std::shared_ptr Scatter(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ScatterOptions& opts, @@ -183,6 +239,16 @@ class ProcessGroupWithStream : public ProcessGroup { /*use_calc_stream*/ false); } + std::shared_ptr Scatter(phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + const ScatterOptions& opts, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support scatter.", + GetBackendName())); + } + std::shared_ptr Recv(phi::DenseTensor* tensor, int src_rank, bool sync_op) override { @@ -218,6 +284,17 @@ class ProcessGroupWithStream : public ProcessGroup { use_calc_stream); } + std::shared_ptr Recv(phi::DenseTensor* tensor, + int src_rank, + int64_t offset, + int64_t numel, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support recv.", + GetBackendName())); + } + std::shared_ptr Send(const phi::DenseTensor& tensor, int dst_rank, bool sync_op) override { @@ -252,6 +329,17 @@ class ProcessGroupWithStream : public ProcessGroup { sync_op, use_calc_stream); } + + std::shared_ptr Send(const phi::DenseTensor& tensor, + int dst_rank, + int64_t offset, + int64_t numel, + bool sync_op, + bool use_calc_stream) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithStream (%s) does not support send.", + GetBackendName())); + } }; } // namespace distributed diff --git a/paddle/fluid/distributed/collective/process_group_without_stream.h b/paddle/fluid/distributed/collective/process_group_without_stream.h index ee05906966ac46aed6d8b84d0f869171a505142d..dd22c0f1e4cbdbae49b25a8aaa207064663e08a2 100644 --- a/paddle/fluid/distributed/collective/process_group_without_stream.h +++ b/paddle/fluid/distributed/collective/process_group_without_stream.h @@ -28,11 +28,6 @@ class ProcessGroupWithoutStream : public ProcessGroup { virtual ~ProcessGroupWithoutStream() = default; - // methods from base class - using ProcessGroup::AllGather; - using ProcessGroup::Recv; - using ProcessGroup::Send; - std::shared_ptr AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, @@ -44,6 +39,17 @@ class ProcessGroupWithoutStream : public ProcessGroup { sync_op); } + std::shared_ptr AllGather( + phi::DenseTensor* out_tensor, + const phi::DenseTensor& in_tensor, + int64_t offset, + int64_t numel, + bool sync_op) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithoutStream (%s) does not support all_gather.", + GetBackendName())); + } + std::shared_ptr Recv(phi::DenseTensor* tensor, int src_rank, bool sync_op) override { @@ -54,6 +60,16 @@ class ProcessGroupWithoutStream : public ProcessGroup { sync_op); } + std::shared_ptr Recv(phi::DenseTensor* tensor, + int src_rank, + int64_t offset, + int64_t numel, + bool sync_op) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithoutStream (%s) does not support recv.", + GetBackendName())); + } + std::shared_ptr Send(const phi::DenseTensor& tensor, int dst_rank, bool sync_op) override { @@ -63,6 +79,16 @@ class ProcessGroupWithoutStream : public ProcessGroup { /*numel*/ -1, // -1 indicates the whole tensor sync_op); } + + std::shared_ptr Send(const phi::DenseTensor& tensor, + int dst_rank, + int64_t offset, + int64_t numel, + bool sync_op) override { + PADDLE_THROW(phi::errors::Unimplemented( + "ProcessGroupWithoutStream (%s) does not support send.", + GetBackendName())); + } }; } // namespace distributed