// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include #include #include #include "paddle/fluid/distributed/collective/ProcessGroupStream.h" #include "paddle/fluid/distributed/store/store.h" #include "paddle/fluid/platform/cuda_device_guard.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gen_comm_id_helper.h" #include "paddle/fluid/platform/place.h" #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #include "paddle/fluid/distributed/collective/NCCLTools.h" #endif #ifdef PADDLE_WITH_RCCL #include "paddle/fluid/platform/dynload/rccl.h" #else #include "paddle/fluid/platform/dynload/nccl.h" #endif constexpr const char* NCCL_BACKEND_NAME = "NCCL"; namespace paddle { namespace distributed { using Place = paddle::platform::Place; class ProcessGroupNCCL : public ProcessGroupStream { public: class NCCLTask : public ProcessGroupStream::TaskStream, public std::enable_shared_from_this { public: NCCLTask(const std::vector& places, int rank, CommType CommType, const std::vector& inputs); NCCLTask(const std::vector& places, int rank, CommType comm_type, const std::vector& inputs, bool sync_op, bool use_calc_stream); bool IsCompleted(); void SynchronizeStreams(); bool Wait(std::chrono::milliseconds timeout = kWaitTimeout); void Synchronize(); void SetOutputs(std::vector& outputs); // NOLINT virtual ~NCCLTask(); void UpdateWaitChain(const phi::DeviceContext& ctx) override; std::vector control_events_; std::vector barrierTensors_; protected: std::vector places_; std::vector> ncclComms_; std::shared_ptr> outputs_; private: }; ProcessGroupNCCL(const std::shared_ptr& store, int rank, int size, const platform::Place& place, int gid); const std::string GetBackendName() const override { return std::string(NCCL_BACKEND_NAME); } const phi::DeviceContext& GetDeviceContext(const Place& place) const override; const phi::DeviceContext& GetDeviceContext( const Place& place, bool use_calc_stream) const override; std::shared_ptr AllReduce( std::vector& in_tensors, // NOLINT std::vector& out_tensors, // NOLINT const AllreduceOptions& options, bool sync_op, bool use_calc_stream) override; // TODO(liyurui): This API will be moved later std::shared_ptr AllReduce( std::vector& in_tensors, std::vector& out_tensors, const AllreduceOptions& = AllreduceOptions()) override; std::shared_ptr Broadcast( std::vector& in_tensors, std::vector& out_tensors, const BroadcastOptions& = BroadcastOptions()) override; std::shared_ptr Broadcast( std::vector& in_tensors, std::vector& out_tensors, const BroadcastOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Barrier( const BarrierOptions& = BarrierOptions()) override; std::shared_ptr Send( std::vector& tensors, int dst_rank) override; std::shared_ptr Send( std::vector& tensors, int dst_rank, bool sync_op, bool use_calc_stream) override; std::shared_ptr Recv( std::vector& tensors, int src_rank) override; std::shared_ptr Recv( std::vector& tensors, int src_rank, bool sync_op, bool use_calc_stream) override; std::shared_ptr Send_Partial(phi::DenseTensor& tensors, int dst_rank, int64_t offset, int64_t length) override; std::shared_ptr Send_Partial( phi::DenseTensor& tensors, int dst_rank, int64_t offset, int64_t length, bool sync_op, bool use_calc_stream) override; std::shared_ptr Recv_Partial(phi::DenseTensor& tensors, int src_rank, int64_t offset, int64_t length) override; std::shared_ptr Recv_Partial( phi::DenseTensor& tensors, int src_rank, int64_t offset, int64_t length, bool sync_op, bool use_calc_stream) override; std::shared_ptr AllGather( std::vector& in_tensors, std::vector& out_tensors) override; std::shared_ptr AllGather( std::vector& in_tensors, std::vector& out_tensors, bool sync_op, bool use_calc_stream) override; std::shared_ptr AllGather_Partial( std::vector& in_tensors, std::vector& out_tensors, int64_t offset, int64_t length) override; std::shared_ptr AllGather_Partial( std::vector& in_tensors, std::vector& out_tensors, int64_t offset, int64_t length, bool sync_op, bool use_calc_stream) override; std::shared_ptr AllToAll( std::vector& in_tensors, std::vector& out_tensors) override; std::shared_ptr AllToAll( std::vector& in_tensors, std::vector& out_tensors, bool sync_op, bool use_calc_stream) override; std::shared_ptr AllToAll_Single( std::vector& in, std::vector& out, std::vector& in_sizes, std::vector& out_sizes) override; std::shared_ptr AllToAllSingle( std::vector& in_tensors, std::vector& out_tensors, std::vector& in_sizes, std::vector& out_sizes, bool sync_op, bool use_calc_stream) override; std::shared_ptr Reduce( std::vector& tensors, std::vector& out_tensors, const ReduceOptions& opts) override; std::shared_ptr Reduce( std::vector& in_tensors, std::vector& out_tensors, const ReduceOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr ReduceScatter( std::vector& in_tensors, std::vector& out_tensors, const ReduceScatterOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr Scatter( std::vector& in_tensors, std::vector& out_tensors, const ScatterOptions& opts) override; std::shared_ptr Scatter( std::vector& in_tensors, std::vector& out_tensors, const ScatterOptions& opts, bool sync_op, bool use_calc_stream) override; std::shared_ptr _ReduceScatterBase( phi::DenseTensor&, // NOLINT phi::DenseTensor&, // NOLINT const ReduceScatterOptions&) override; static void GroupStart(); static void GroupEnd(); ncclComm_t NCCLComm(const Place& place) const; protected: virtual std::shared_ptr CreateTask( std::vector places, int rank, CommType op_type, const std::vector& inputs); virtual std::shared_ptr CreateTask( const std::vector& places, int rank, CommType op_type, const std::vector& inputs, bool sync_op, bool use_calc_stream); protected: std::shared_ptr store_; std::shared_ptr nccl_comm_; std::mutex mutex_; std::unordered_map>> places_to_ncclcomm_; std::unordered_map> places_to_events_; std::unordered_map>> places_to_ctx_; std::set used_place_ids_; private: void BcastNCCLId(std::vector& nccl_ids, // NOLINT int root, // NOLINT int server_fd); void BroadcastUniqueNCCLID(std::vector& nccl_ids); // NOLINT template std::shared_ptr Collective( std::vector& inputs, // NOLINT std::vector& outputs, // NOLINT Fn fn, CommType op_type); template std::shared_ptr Collective( std::vector& inputs, // NOLINT std::vector& outputs, // NOLINT Fn fn, CommType comm_type, bool sync_op, bool use_calc_stream); template void Collective(const phi::DenseTensor*, phi::DenseTensor*, Fn fn, CommType op_type); template std::shared_ptr PointToPoint( std::vector& tensors, // NOLINT Fn fn, int dst_rank, CommType op_type); template std::shared_ptr PointToPoint( std::vector& tensors, // NOLINT Fn fn, int dst_rank, CommType op_type, bool sync_op, bool use_calc_stream); void CreateNCCLManagerCache(const std::string& places_key, const std::vector& places); void CheckSplitSizes(std::vector* split_sizes, std::vector tensor_shape); }; } // namespace distributed } // namespace paddle