/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Copyright (c) 2022 NVIDIA Corporation. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include #include // NOLINT #include #include // NOLINT #include #include #include #include #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/platform/device/gpu/gpu_types.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/custom/custom_context.h" #include "paddle/phi/backends/gpu/gpu_decls.h" #include "paddle/phi/core/device_context.h" #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/device/gpu/gpu_helper.h" #include "paddle/fluid/platform/dynload/cublas.h" #include "paddle/fluid/platform/dynload/cublasLt.h" #include "paddle/fluid/platform/dynload/cudnn.h" #include "paddle/fluid/platform/dynload/cusolver.h" #include "paddle/fluid/platform/dynload/cusparse.h" #include "paddle/phi/backends/gpu/gpu_context.h" #if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL) #include "paddle/fluid/platform/dynload/nccl.h" #endif #include "paddle/fluid/platform/device/gpu/gpu_info.h" #endif #ifdef PADDLE_WITH_HIP #include "paddle/fluid/platform/device/gpu/gpu_helper.h" // NOLINT #include "paddle/fluid/platform/dynload/miopen.h" #include "paddle/fluid/platform/dynload/rocblas.h" #include "paddle/phi/backends/gpu/gpu_context.h" // NOLINT #if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL) #include "paddle/fluid/platform/dynload/rccl.h" #endif #include "paddle/fluid/platform/device/gpu/gpu_info.h" // NOLINT #endif #if defined(PADDLE_WITH_XPU_BKCL) #include "xpu/bkcl.h" #endif #ifdef PADDLE_WITH_MKLDNN #include "dnnl.hpp" // NOLINT #include "paddle/fluid/framework/data_layout.h" #include "paddle/phi/backends/onednn/onednn_context.h" #endif #include #include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #ifdef PADDLE_WITH_ASCEND_CL #include "paddle/fluid/platform/device/npu/enforce_npu.h" #include "paddle/fluid/platform/device/npu/npu_stream.h" #endif #include "paddle/phi/backends/device_ext.h" #include "paddle/phi/backends/stream.h" #if !defined(PADDLE_WITH_XPU_KP) || defined(__xpu_on_host__) #include "unsupported/Eigen/CXX11/Tensor" #endif namespace Eigen { struct DefaultDevice; struct GpuDevice; } // namespace Eigen #ifdef PADDLE_WITH_XPU #include "paddle/fluid/platform/device/xpu/xpu_header.h" #include "paddle/fluid/platform/device/xpu/xpu_info.h" #include "paddle/phi/backends/xpu/xpu_context.h" #endif #ifdef PADDLE_WITH_ASCEND_CL #include "acl/acl.h" #include "paddle/fluid/platform/device/npu/npu_info.h" #endif namespace paddle { namespace platform { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) /*Set the value of the global variable allow_tf32_cublas*/ void SetAllowTF32Cublas(bool active); /*Get the global variable allow_tf32_cublas value*/ bool AllowTF32Cublas(); extern bool allow_tf32_cudnn; /*Set the value of the global variable allow_tf32_cudnn*/ void SetAllowTF32Cudnn(bool active); /*Get the global variable allow_tf32_cudnn value*/ bool AllowTF32Cudnn(); #endif // PADDLE_WITH_CUDA enum DeviceType { CPU = 0, CUDA = 1, XPU = 2, NPU = 3, IPU = 4, MLU = 5, MAX_DEVICE_TYPES = 6, }; DeviceType Place2DeviceType(const platform::Place& place); constexpr DeviceType kCPU = DeviceType::CPU; constexpr DeviceType kCUDA = DeviceType::CUDA; constexpr DeviceType kXPU = DeviceType::XPU; constexpr DeviceType kNPU = DeviceType::NPU; constexpr DeviceType kIPU = DeviceType::IPU; constexpr DeviceType kMLU = DeviceType::MLU; using DeviceContext = phi::DeviceContext; template struct DefaultDeviceContextType; template <> struct DefaultDeviceContextType { using TYPE = phi::CPUContext; }; // Graphcore IPU #ifdef PADDLE_WITH_IPU class IPUDeviceContext : public DeviceContext { public: IPUDeviceContext() = delete; explicit IPUDeviceContext(IPUPlace place); virtual ~IPUDeviceContext(); Eigen::DefaultDevice* eigen_device() const { return nullptr; } const Place& GetPlace() const override; /*! \brief Wait for all operations completion in the stream. */ void Wait() const override; private: IPUPlace place_; }; template <> struct DefaultDeviceContextType { using TYPE = IPUDeviceContext; }; #endif #ifdef PADDLE_WITH_MLU class MLUDeviceContext; template <> struct DefaultDeviceContextType; #endif #ifdef PADDLE_WITH_XPU namespace xpu = baidu::xpu::api; class XPUDeviceContext : public phi::XPUContext { public: XPUDeviceContext(); explicit XPUDeviceContext(XPUPlace place); virtual ~XPUDeviceContext(); Eigen::DefaultDevice* eigen_device() const { return nullptr; } xpuStream stream() const { return XPUContext::x_context()->xpu_stream; } }; template <> struct DefaultDeviceContextType { using TYPE = XPUDeviceContext; }; #endif #ifdef PADDLE_WITH_ASCEND_CL class NPUDeviceContext : public DeviceContext { public: explicit NPUDeviceContext(NPUPlace place); virtual ~NPUDeviceContext(); Eigen::DefaultDevice* eigen_device() const { return nullptr; } const Place& GetPlace() const override; aclrtContext context() const; /*! \brief Wait for all operations completion in the stream. */ void Wait() const override; /*! \brief Return npu stream in the device context. */ aclrtStream stream() const; template void AddStreamCallback(Callback&& callback) const { return stream_->AddCallback(callback); } void WaitStreamCallback() const { return stream_->WaitCallback(); } #if defined(PADDLE_WITH_ASCEND_CL) /*! \brief Return hccl communicators. */ HcclComm hccl_comm() const { return hccl_comm_; } /*! \brief Set hccl communicators. */ void set_hccl_comm(HcclComm comm) { hccl_comm_ = comm; } #endif // template // void AddStreamCallback(Callback&& callback) const { // return stream_->AddCallback(callback); // } // void WaitStreamCallback() const { return stream_->WaitCallback(); } private: NPUPlace place_; aclrtContext context_; #ifdef PADDLE_WITH_ASCEND_CL // HCCLContext_t hccl_context_; HcclComm hccl_comm_{nullptr}; #endif // Need to be the same with other DeviceContext, // Eventhough eigen_device_ is not used in NPU // NOTE(zhiqiu): why need? std::unique_ptr eigen_device_; std::shared_ptr stream_; DISABLE_COPY_AND_ASSIGN(NPUDeviceContext); }; template <> struct DefaultDeviceContextType { using TYPE = NPUDeviceContext; }; // Currently, NPUPinnedDeviceContext is only used to data copying. class NPUPinnedDeviceContext : public DeviceContext { public: NPUPinnedDeviceContext(); explicit NPUPinnedDeviceContext(NPUPinnedPlace place); const Place& GetPlace() const override; Eigen::DefaultDevice* eigen_device() const; private: NPUPinnedPlace place_; std::unique_ptr eigen_device_; }; template <> struct DefaultDeviceContextType { using TYPE = NPUPinnedDeviceContext; }; #endif #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) class CudnnWorkspaceHandle; class EigenCudaStreamDevice; class CudnnWorkspaceHandle { public: inline CudnnWorkspaceHandle(const phi::GPUContext& dev_ctx, std::mutex* mtx) : device_context_(dev_ctx), mtx_(mtx) {} template inline void RunFunc(Callback&& cudnn_func, size_t required_workspace_bytes) { if (required_workspace_bytes > WorkspaceSize()) { ReallocWorkspace(required_workspace_bytes); } VLOG(2) << "Cudnn workspace size at RunFunc: " << static_cast(WorkspaceSize()) / (1 << 20) << " MB"; { std::lock_guard guard(*mtx_); cudnn_func(allocation_ ? allocation_->ptr() : nullptr); } } /*! \brief Thread which call RunFuncSync() would release gpu memory after * running the function. Currently this function is only used when cudnn * exhaustive searching and callers have to guarantee that the input function * is host blocking */ template inline void RunFuncSync(Callback&& cudnn_func, size_t required_workspace_bytes) { RunFunc(cudnn_func, required_workspace_bytes); ResetWorkspace(); } void ReallocWorkspace(size_t required_workspace_bytes); inline void ResetWorkspace() { allocation_ = nullptr; } inline size_t WorkspaceSize() { if (allocation_ == nullptr) { return 0; } return allocation_->size(); } CudnnWorkspaceHandle(CudnnWorkspaceHandle&&) = default; CudnnWorkspaceHandle& operator=(CudnnWorkspaceHandle&&) = delete; private: memory::allocation::AllocationPtr allocation_; const phi::GPUContext& device_context_; std::mutex* mtx_; }; template <> struct DefaultDeviceContextType { using TYPE = phi::GPUContext; }; // Currently, CUDAPinnedDeviceContext is only used to data copying. class CUDAPinnedDeviceContext : public DeviceContext { public: CUDAPinnedDeviceContext(); explicit CUDAPinnedDeviceContext(CUDAPinnedPlace place); const Place& GetPlace() const override; Eigen::DefaultDevice* eigen_device() const; private: CUDAPinnedPlace place_; std::unique_ptr eigen_device_; }; template <> struct DefaultDeviceContextType { using TYPE = CUDAPinnedDeviceContext; }; #endif #ifdef PADDLE_WITH_MKLDNN using MKLDNNDeviceContextThreadLocals = phi::OneDNNContextThreadLocals; using MKLDNNDeviceContext = phi::OneDNNContext; #endif #ifdef PADDLE_WITH_CUSTOM_DEVICE class CustomDeviceContext : public phi::CustomContext { public: explicit CustomDeviceContext(CustomPlace place); virtual ~CustomDeviceContext(); Eigen::DefaultDevice* eigen_device() const { return nullptr; } template void AddStreamCallback(Callback&& callback) const { return stream_->AddCallback(callback); } void WaitStreamCallback() const { return stream_->WaitCallback(); } private: std::shared_ptr stream_; }; template <> struct DefaultDeviceContextType { using TYPE = CustomDeviceContext; }; #else template <> struct DefaultDeviceContextType { using TYPE = DeviceContext; }; #endif void EmplaceDeviceContexts( std::map>>* place_to_device_context, const std::vector& places, bool disable_setting_default_stream_for_allocator); /*! \brief device context pool singleton */ class DeviceContextPool { public: static DeviceContextPool& Instance() { PADDLE_ENFORCE_NOT_NULL(pool, platform::errors::PreconditionNotMet( "Need to Create DeviceContextPool firstly!")); return *pool; } /*! \brief Create should only called by Init function */ static DeviceContextPool& Init(const std::vector& places) { if (pool == nullptr) { pool = new DeviceContextPool(places); } return *pool; } static bool IsInitialized() { return pool != nullptr; } static void SetPool(DeviceContextPool* dev_pool) { pool = dev_pool; } /*! \brief Return handle of single device context. */ platform::DeviceContext* Get(const platform::Place& place); template const typename DefaultDeviceContextType::TYPE* GetByPlace( const Place& place) { return reinterpret_cast< const typename DefaultDeviceContextType::TYPE*>(Get(place)); } size_t size() const; const std::map>>& device_contexts() const; static void SetDeviceContexts( const std::map>>*); private: explicit DeviceContextPool(const std::vector& places); static DeviceContextPool* pool; std::map>> device_contexts_; static thread_local const std:: map>>* external_device_contexts_; // not owned DISABLE_COPY_AND_ASSIGN(DeviceContextPool); }; } // namespace platform } // namespace paddle