未验证 提交 215c7ae7 编写于 作者: W Wen Sun 提交者: GitHub

Fix hidden overloaded functions in process group (#49576)

* fix: fix hidden virtual funcs

* fix: add default impl
上级 6e80b84d
......@@ -62,17 +62,6 @@ class ProcessGroupWithStream : public ProcessGroup {
virtual ~ProcessGroupWithStream() = default;
// methods from base class
using ProcessGroup::AllGather;
using ProcessGroup::AllReduce;
using ProcessGroup::AllToAll;
using ProcessGroup::Broadcast;
using ProcessGroup::Recv;
using ProcessGroup::Reduce;
using ProcessGroup::ReduceScatter;
using ProcessGroup::Scatter;
using ProcessGroup::Send;
std::shared_ptr<ProcessGroup::Task> AllGather(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
......@@ -111,6 +100,18 @@ class ProcessGroupWithStream : public ProcessGroup {
use_calc_stream);
}
std::shared_ptr<ProcessGroup::Task> AllGather(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
int64_t offset,
int64_t numel,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_gather.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> AllReduce(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
......@@ -123,6 +124,17 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> AllReduce(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const AllreduceOptions& opts,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_reduce.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> AllToAll(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
......@@ -137,6 +149,18 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> AllToAll(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const std::vector<int64_t>& out_size_each_rank,
const std::vector<int64_t>& in_size_each_rank,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support all_to_all.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Broadcast(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
......@@ -149,6 +173,17 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> Broadcast(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const BroadcastOptions& opts,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support broadcast.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Reduce(phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ReduceOptions& opts,
......@@ -160,6 +195,16 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> Reduce(phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ReduceOptions& opts,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support reduce.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> ReduceScatter(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
......@@ -172,6 +217,17 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> ReduceScatter(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ReduceScatterOptions& opts,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support reduce_scatter.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Scatter(phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ScatterOptions& opts,
......@@ -183,6 +239,16 @@ class ProcessGroupWithStream : public ProcessGroup {
/*use_calc_stream*/ false);
}
std::shared_ptr<ProcessGroup::Task> Scatter(phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
const ScatterOptions& opts,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support scatter.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor,
int src_rank,
bool sync_op) override {
......@@ -218,6 +284,17 @@ class ProcessGroupWithStream : public ProcessGroup {
use_calc_stream);
}
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor,
int src_rank,
int64_t offset,
int64_t numel,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support recv.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Send(const phi::DenseTensor& tensor,
int dst_rank,
bool sync_op) override {
......@@ -252,6 +329,17 @@ class ProcessGroupWithStream : public ProcessGroup {
sync_op,
use_calc_stream);
}
std::shared_ptr<ProcessGroup::Task> Send(const phi::DenseTensor& tensor,
int dst_rank,
int64_t offset,
int64_t numel,
bool sync_op,
bool use_calc_stream) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithStream (%s) does not support send.",
GetBackendName()));
}
};
} // namespace distributed
......
......@@ -28,11 +28,6 @@ class ProcessGroupWithoutStream : public ProcessGroup {
virtual ~ProcessGroupWithoutStream() = default;
// methods from base class
using ProcessGroup::AllGather;
using ProcessGroup::Recv;
using ProcessGroup::Send;
std::shared_ptr<ProcessGroup::Task> AllGather(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
......@@ -44,6 +39,17 @@ class ProcessGroupWithoutStream : public ProcessGroup {
sync_op);
}
std::shared_ptr<ProcessGroup::Task> AllGather(
phi::DenseTensor* out_tensor,
const phi::DenseTensor& in_tensor,
int64_t offset,
int64_t numel,
bool sync_op) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithoutStream (%s) does not support all_gather.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor,
int src_rank,
bool sync_op) override {
......@@ -54,6 +60,16 @@ class ProcessGroupWithoutStream : public ProcessGroup {
sync_op);
}
std::shared_ptr<ProcessGroup::Task> Recv(phi::DenseTensor* tensor,
int src_rank,
int64_t offset,
int64_t numel,
bool sync_op) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithoutStream (%s) does not support recv.",
GetBackendName()));
}
std::shared_ptr<ProcessGroup::Task> Send(const phi::DenseTensor& tensor,
int dst_rank,
bool sync_op) override {
......@@ -63,6 +79,16 @@ class ProcessGroupWithoutStream : public ProcessGroup {
/*numel*/ -1, // -1 indicates the whole tensor
sync_op);
}
std::shared_ptr<ProcessGroup::Task> Send(const phi::DenseTensor& tensor,
int dst_rank,
int64_t offset,
int64_t numel,
bool sync_op) override {
PADDLE_THROW(phi::errors::Unimplemented(
"ProcessGroupWithoutStream (%s) does not support send.",
GetBackendName()));
}
};
} // namespace distributed
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册