device_context.cc 7.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2 3 4 5 6 7 8 9 10
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yi Wang 已提交
11
#include "paddle/fluid/platform/device_context.h"
12 13

#include <string>
Y
Yu Yang 已提交
14
#include <unordered_set>
15 16
#include <vector>

Y
Yi Wang 已提交
17
#include "paddle/fluid/memory/memory.h"
18

Q
qijun 已提交
19 20 21
namespace paddle {
namespace platform {

D
dzhwinter 已提交
22 23
DeviceContextPool* DeviceContextPool::pool = nullptr;

Y
Yu Yang 已提交
24
platform::DeviceContext* DeviceContextPool::Get(const platform::Place& place) {
D
dzhwinter 已提交
25 26 27 28 29 30
  auto it = device_contexts_.find(place);
  if (it == device_contexts_.end()) {
    PADDLE_THROW(
        "'Place' is not supported, Please re-compile with WITH_GPU "
        "option");
  }
Y
Yu Yang 已提交
31
  return it->second.get();
D
dzhwinter 已提交
32 33 34 35 36
}

DeviceContextPool::DeviceContextPool(
    const std::vector<platform::Place>& places) {
  PADDLE_ENFORCE_GT(places.size(), 0);
Y
Yu Yang 已提交
37 38 39 40 41 42 43 44
  using PtrType = std::unique_ptr<DeviceContext>;
  std::unordered_set<Place, PlaceHash> set;
  for (auto& p : places) {
    set.insert(p);
  }

  for (auto& p : set) {
    if (platform::is_cpu_place(p)) {
45
#ifdef PADDLE_WITH_MKLDNN
Y
Yu Yang 已提交
46 47
      device_contexts_.emplace(
          p, PtrType(new MKLDNNDeviceContext(boost::get<CPUPlace>(p))));
48
#else
Y
Yu Yang 已提交
49 50
      device_contexts_.emplace(
          p, PtrType(new CPUDeviceContext(boost::get<CPUPlace>(p))));
51
#endif
Y
Yu Yang 已提交
52
    } else if (platform::is_gpu_place(p)) {
D
dzhwinter 已提交
53
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
54 55
      device_contexts_.emplace(
          p, PtrType(new CUDADeviceContext(boost::get<CUDAPlace>(p))));
D
dzhwinter 已提交
56 57
#else
      PADDLE_THROW(
D
dzhwinter 已提交
58
          "'CUDAPlace' is not supported, Please re-compile with WITH_GPU "
D
dzhwinter 已提交
59
          "option");
C
chengduoZH 已提交
60 61 62 63 64 65 66 67 68 69
#endif
    } else if (platform::is_cuda_pinned_place(p)) {
#ifdef PADDLE_WITH_CUDA
      device_contexts_.emplace(
          p,
          PtrType(new CUDAPinnedDeviceContext(boost::get<CUDAPinnedPlace>(p))));
#else
      PADDLE_THROW(
          "'CUDAPlace' is not supported, Please re-compile with WITH_GPU "
          "option");
D
dzhwinter 已提交
70 71 72 73 74
#endif
    }
  }
}

75 76 77 78
CPUDeviceContext::CPUDeviceContext() {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

D
dzhwinter 已提交
79
CPUDeviceContext::CPUDeviceContext(CPUPlace place) : place_(place) {
80 81 82 83 84 85 86
  eigen_device_.reset(new Eigen::DefaultDevice());
}

Eigen::DefaultDevice* CPUDeviceContext::eigen_device() const {
  return eigen_device_.get();
}

D
dzhwinter 已提交
87
Place CPUDeviceContext::GetPlace() const { return place_; }
88

89
#ifdef PADDLE_WITH_CUDA
90

Q
init  
qijun 已提交
91 92 93 94 95 96 97
class EigenCudaStreamDevice : public Eigen::StreamInterface {
 public:
  EigenCudaStreamDevice() : scratch_(nullptr), semaphore_(nullptr) {
    Eigen::initializeDeviceProp();
  }
  ~EigenCudaStreamDevice() override {}

D
dzhwinter 已提交
98
  void Reinitialize(const cudaStream_t* cuda_stream, CUDAPlace place) {
Q
init  
qijun 已提交
99 100 101 102 103 104 105 106 107 108 109 110
    stream_ = cuda_stream;
    place_ = place;
    device_prop_ = &Eigen::m_deviceProperties[place.device];
  }

  const cudaStream_t& stream() const override { return *stream_; }

  const cudaDeviceProp& deviceProperties() const override {
    return *device_prop_;
  }

  void* allocate(size_t num_bytes) const override {
Q
qijun 已提交
111
    return paddle::memory::Alloc(place_, num_bytes);
Q
init  
qijun 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
  }

  void deallocate(void* buffer) const override {
    paddle::memory::Free(place_, buffer);
  }

  void* scratchpad() const override {
    if (scratch_ == NULL) {
      scratch_ = allocate(Eigen::kCudaScratchSize + sizeof(unsigned int));
    }
    return scratch_;
  }

  unsigned int* semaphore() const override {
    if (semaphore_ == NULL) {
      char* scratch =
          static_cast<char*>(scratchpad()) + Eigen::kCudaScratchSize;
      semaphore_ = reinterpret_cast<unsigned int*>(scratch);
      PADDLE_ENFORCE(
          cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_));
    }
    return semaphore_;
  }

 private:
D
dzhwinter 已提交
137
  CUDAPlace place_;
Q
init  
qijun 已提交
138 139
  const cudaStream_t* stream_;         // not owned;
  const cudaDeviceProp* device_prop_;  // not owned;
Q
qijun 已提交
140
  mutable void* scratch_;
Q
init  
qijun 已提交
141 142 143
  mutable unsigned int* semaphore_;
};

D
dzhwinter 已提交
144
CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) {
145
  SetDeviceId(place_.device);
K
Kexin Zhao 已提交
146
  compute_capability = GetCUDAComputeCapability(place_.device);
147 148
  multi_process = GetCUDAMultiProcessors(place_.device);
  max_threads_per_mp = GetCUDAMaxThreadsPerMultiProcessor(place_.device);
Q
init  
qijun 已提交
149 150 151
  PADDLE_ENFORCE(cudaStreamCreate(&stream_));
  eigen_stream_.reset(new EigenCudaStreamDevice());
  eigen_stream_->Reinitialize(&stream_, place);
152
  eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get()));
153 154
  PADDLE_ENFORCE(dynload::cublasCreate(&cublas_handle_));
  PADDLE_ENFORCE(dynload::cublasSetStream(cublas_handle_, stream_));
D
dzhwinter 已提交
155 156 157 158 159 160
  if (dynload::HasCUDNN()) {
    PADDLE_ENFORCE(dynload::cudnnCreate(&cudnn_handle_));
    PADDLE_ENFORCE(dynload::cudnnSetStream(cudnn_handle_, stream_));
  } else {
    cudnn_handle_ = nullptr;
  }
161 162 163 164
}

CUDADeviceContext::~CUDADeviceContext() {
  SetDeviceId(place_.device);
L
liaogang 已提交
165
  Wait();
166
  PADDLE_ENFORCE(dynload::cublasDestroy(cublas_handle_));
D
dzhwinter 已提交
167 168 169
  if (cudnn_handle_ != nullptr) {
    PADDLE_ENFORCE(dynload::cudnnDestroy(cudnn_handle_));
  }
170 171
  eigen_stream_.reset();
  eigen_device_.reset();
Q
init  
qijun 已提交
172
  PADDLE_ENFORCE(cudaStreamDestroy(stream_));
173 174
}

L
liaogang 已提交
175
Place CUDADeviceContext::GetPlace() const { return place_; }
176

L
liaogang 已提交
177
void CUDADeviceContext::Wait() const {
Y
Yu Yang 已提交
178
  std::lock_guard<std::recursive_mutex> guard(mutex_);
Q
init  
qijun 已提交
179
  PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
180 181 182
  PADDLE_ENFORCE(cudaGetLastError());
}

K
Kexin Zhao 已提交
183 184 185 186
int CUDADeviceContext::GetComputeCapability() const {
  return compute_capability;
}

187 188 189 190
int CUDADeviceContext::GetMaxPhysicalThreadCount() const {
  return multi_process * max_threads_per_mp;
}

191 192 193 194
Eigen::GpuDevice* CUDADeviceContext::eigen_device() const {
  return eigen_device_.get();
}

195
cublasHandle_t CUDADeviceContext::cublas_handle() const {
196 197 198
  return cublas_handle_;
}

199
cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; }
200

201
cudaStream_t CUDADeviceContext::stream() const { return stream_; }
Q
qijun 已提交
202

C
chengduoZH 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216
CUDAPinnedDeviceContext::CUDAPinnedDeviceContext() {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

CUDAPinnedDeviceContext::CUDAPinnedDeviceContext(CUDAPinnedPlace place)
    : place_(place) {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

Eigen::DefaultDevice* CUDAPinnedDeviceContext::eigen_device() const {
  return eigen_device_.get();
}

Place CUDAPinnedDeviceContext::GetPlace() const { return place_; }
L
Luo Tao 已提交
217
#endif
Q
qijun 已提交
218

T
tensor-tang 已提交
219 220
#ifdef PADDLE_WITH_MKLDNN
MKLDNNDeviceContext::MKLDNNDeviceContext(CPUPlace place)
221 222
    : CPUDeviceContext(place), engine_(mkldnn::engine::cpu, 0), p_blobs_() {
  p_blobs_.reset(new std::unordered_map<std::string, std::shared_ptr<void>>());
T
tensor-tang 已提交
223 224
}

225 226 227 228
void MKLDNNDeviceContext::SetBlob(const std::string& name,
                                  std::shared_ptr<void> data) const {
  std::unordered_map<std::string, std::shared_ptr<void>>* p;
  p = p_blobs_.get();
T
tensor-tang 已提交
229

230
  auto it = p->find(name);
T
tensor-tang 已提交
231

232 233 234 235 236
  if (it == p->end()) {
    (*p)[name] = data;  // create new blob
  } else {
    it->second = data;  // set data to existing blob
  }
T
tensor-tang 已提交
237

238
  return;
T
tensor-tang 已提交
239 240
}

241 242 243 244
std::shared_ptr<void> MKLDNNDeviceContext::GetBlob(
    const std::string& name) const {
  std::unordered_map<std::string, std::shared_ptr<void>>* p;
  p = p_blobs_.get();
T
tensor-tang 已提交
245

246
  auto it = p->find(name);
T
tensor-tang 已提交
247

248 249
  if (it != p->end()) {
    return it->second;
T
tensor-tang 已提交
250
  }
251 252

  return nullptr;
T
tensor-tang 已提交
253 254 255 256
}

#endif

Q
qijun 已提交
257
}  // namespace platform
Q
qijun 已提交
258
}  // namespace paddle