// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef _WIN32 #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #pragma once #include #include #include #include #include "boost/variant.hpp" #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace platform { // In order to apply hierarchical communication with NCCL, we need // a communication ring contains NCCL communicators associated to a global // ncclUniqueId. E.g. for a hierarchical case, // // 11 - 12 21 - 22 // | | | | // 13 - 14 - 23 - 24 // | | // 31 - 32 - 41 - 42 // | | | | // 33 - 34 43 - 44 // // we group (14,23,32,41) as the top, and (11,12,13,14), (21,22,23,24), // (31,32,33,34), (41,42,43,44) as bottoms respectively. // // We could also use a single communication ring for the flatten case // // The NCCLComm instance is created and reversed in the NCCLCommContext // singleton with a global user specified group id. class NCCLComm { public: virtual int ring_id() const = 0; virtual int nranks() const = 0; virtual int rank() const = 0; virtual int local_rank() const = 0; virtual ncclComm_t comm() const = 0; virtual cudaStream_t stream() const = 0; virtual CUDADeviceContext* DevCtx() const = 0; virtual ~NCCLComm() = default; }; // a singleton NCCL communicator context reserves communication ring ids // Assume multiprocessing mode class NCCLCommContext { public: static NCCLCommContext& Instance() { static NCCLCommContext comm_ctx; return comm_ctx; } ~NCCLCommContext(); NCCLComm* CreateNCCLComm(ncclUniqueId* nccl_id, int nranks, int rank, int dev_id, int ring_id = 0); CUDADeviceContext* DevCtx(int dev_id) const { PADDLE_ENFORCE(dev_ctx_map_.count(dev_id), "CUDADeviceContext at device %d has not been initialized"); return dev_ctx_map_.at(dev_id).get(); } CUDADeviceContext* DevCtx(platform::Place p) const { return DevCtx(boost::get(p).device); } // retrieve a communicator by the ring id NCCLComm* Get(int ring_id) const { PADDLE_ENFORCE(comm_map_.count(ring_id), "comunicator in ring id %d has not been initialized", ring_id); return comm_map_.at(ring_id).get(); } private: // ring id to NCCLComm std::unordered_map> comm_map_; // device id to CUDADeviceContext std::unordered_map> dev_ctx_map_; NCCLCommContext() = default; NCCLCommContext(const NCCLCommContext& other) = delete; NCCLCommContext& operator=(const NCCLCommContext& other) = delete; }; } // namespace platform } // namespace paddle #endif