// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include #include #include "paddle/fluid/distributed/collective/process_group.h" #include "paddle/fluid/distributed/collective/process_group_with_stream.h" #include "paddle/fluid/platform/device_event.h" #include "paddle/phi/backends/gpu/forwards.h" #include "paddle/phi/common/place.h" #include "paddle/phi/core/device_context.h" #include "paddle/phi/core/distributed/store/store.h" namespace paddle { namespace distributed { using Place = phi::Place; class ProcessGroupNCCL final : public ProcessGroupWithStream { public: class NCCLTask final : public ProcessGroupWithStream::TaskStream, public std::enable_shared_from_this { public: NCCLTask(const Place& place, int rank, CommType comm_type, bool sync_op, bool use_calc_stream); virtual ~NCCLTask(); bool IsCompleted() override; bool Wait(std::chrono::milliseconds timeout = kWaitTimeout) override; void Synchronize() override; void UpdateWaitChain(const phi::DeviceContext& ctx) override; bool IsBlockCPUInWait() const { return block_cpu_in_wait_; } void SetBlockCPUInWait() { block_cpu_in_wait_ = true; } // TODO(sunyilun): methods below will be removed later NCCLTask(const std::vector& places, int rank, CommType CommType, const std::vector& inputs); private: bool block_cpu_in_wait_{false}; platform::DeviceEvent comm_event_; // event on comm stream Place task_place_; }; public: static std::shared_ptr CreateProcessGroupNCCL( const std::shared_ptr& store, int rank, int size, int gid); ProcessGroupNCCL(const std::shared_ptr& store, int rank, int size, int gid); std::string GetBackendName() const override { return "NCCL"; } phi::DeviceContext* GetDeviceContext(const Place& place) const override; phi::DeviceContext* GetDeviceContext(const Place& place, bool use_calc_stream) const override; std::shared_ptr AllGather( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, int64_t offset, int64_t numel, bool sync_op, bool use_calc_stream) override; std::shared_ptr AllReduce( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const AllreduceOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr AllToAll( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const std::vector& out_size_each_rank, const std::vector& in_size_each_rank, bool sync_op, bool use_calc_stream) override; std::shared_ptr Barrier( const BarrierOptions& = BarrierOptions()) override; std::shared_ptr Broadcast( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const BroadcastOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Reduce(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ReduceOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr ReduceScatter( phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ReduceScatterOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Scatter(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const ScatterOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Gather(phi::DenseTensor* out_tensor, const phi::DenseTensor& in_tensor, const GatherOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Gather( std::vector* gather_tensors_ptr, const phi::DenseTensor& in_tensor, const GatherOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Recv(phi::DenseTensor* tensor, int src_rank, int64_t offset, int64_t numel, bool sync_op, bool use_calc_stream) override; std::shared_ptr Send(const phi::DenseTensor& tensor, int dst_rank, int64_t offset, int64_t numel, bool sync_op, bool use_calc_stream) override; static void GroupStart(); static void GroupEnd(); ncclComm_t NCCLComm(const Place& place) const; // TODO(liyurui): This API will be moved later std::shared_ptr AllReduce( std::vector& in_tensors, std::vector& out_tensors, const AllreduceOptions& = AllreduceOptions()) override; // TODO(sunyilun): methods below will be removed later std::shared_ptr Broadcast( std::vector& in_tensors, std::vector& out_tensors, const BroadcastOptions& = BroadcastOptions()) override; std::shared_ptr Send( std::vector& tensors, int dst_rank) override; std::shared_ptr Recv( std::vector& tensors, int src_rank) override; std::shared_ptr AllGather( std::vector& in_tensors, std::vector& out_tensors) override; std::shared_ptr AllToAll( std::vector& in_tensors, std::vector& out_tensors) override; std::shared_ptr Reduce( std::vector& tensors, std::vector& out_tensors, const ReduceOptions& opts) override; std::shared_ptr Scatter( std::vector& in_tensors, std::vector& out_tensors, const ScatterOptions& opts) override; private: std::shared_ptr CreateTask(const Place& place, int rank, CommType op_type, bool sync_op, bool use_calc_stream); void BroadcastUniqueNCCLID(ncclUniqueId* nccl_id); void CreateNCCLEnvCache(const Place& place, const std::string& place_key); void SyncCalcStream(const Place& place); std::shared_ptr RunFnInNCCLEnv( std::function fn, const phi::DenseTensor& tensor, CommType comm_type, bool sync_op, bool use_calc_stream); // TODO(sunyilun): methods below will be removed later std::shared_ptr CreateTask( std::vector places, int rank, CommType op_type, const std::vector& inputs); template std::shared_ptr Collective( std::vector& inputs, // NOLINT std::vector& outputs, // NOLINT Fn fn, CommType op_type); template std::shared_ptr PointToPoint( std::vector& tensors, // NOLINT Fn fn, int dst_rank, CommType op_type); void CreateNCCLManagerCache(const std::string& places_key, const std::vector& places); private: std::shared_ptr store_; std::unordered_map place_to_calc_event_; // event on calc stream std::unordered_map place_to_calc_ctx_; std::unordered_map> place_to_comm_ctx_; // TODO(sunyilun): attrs below will be removed later std::mutex mutex_; std::unordered_map> places_to_ctx_; }; } // namespace distributed } // namespace paddle