cudnn_helper.h 11.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
D
dangqingqing 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

Y
Pass CI  
Yu Yang 已提交
17
#include <vector>
18 19

#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
20 21
#include "paddle/fluid/platform/dynload/cudnn.h"
#include "paddle/fluid/platform/enforce.h"
K
Kexin Zhao 已提交
22
#include "paddle/fluid/platform/float16.h"
Y
Yi Wang 已提交
23
#include "paddle/fluid/platform/macros.h"
D
dangqingqing 已提交
24

D
dzhwinter 已提交
25 26
DECLARE_bool(cudnn_deterministic);

D
dangqingqing 已提交
27 28 29
namespace paddle {
namespace platform {

Q
Qiao Longfei 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
inline const char* cudnnGetErrorString(cudnnStatus_t status) {
  switch (status) {
    case CUDNN_STATUS_SUCCESS:
      return "CUDNN_STATUS_SUCCESS";
    case CUDNN_STATUS_NOT_INITIALIZED:
      return "CUDNN_STATUS_NOT_INITIALIZED";
    case CUDNN_STATUS_ALLOC_FAILED:
      return "CUDNN_STATUS_ALLOC_FAILED";
    case CUDNN_STATUS_BAD_PARAM:
      return "CUDNN_STATUS_BAD_PARAM";
    case CUDNN_STATUS_INTERNAL_ERROR:
      return "CUDNN_STATUS_INTERNAL_ERROR";
    case CUDNN_STATUS_INVALID_VALUE:
      return "CUDNN_STATUS_INVALID_VALUE";
    case CUDNN_STATUS_ARCH_MISMATCH:
      return "CUDNN_STATUS_ARCH_MISMATCH";
    case CUDNN_STATUS_MAPPING_ERROR:
      return "CUDNN_STATUS_MAPPING_ERROR";
    case CUDNN_STATUS_EXECUTION_FAILED:
      return "CUDNN_STATUS_EXECUTION_FAILED";
    case CUDNN_STATUS_NOT_SUPPORTED:
      return "CUDNN_STATUS_NOT_SUPPORTED";
    case CUDNN_STATUS_LICENSE_ERROR:
      return "CUDNN_STATUS_LICENSE_ERROR";
    default:
      return "Unknown cudnn error number";
  }
}

#define CUDNN_VERSION_MIN(major, minor, patch) \
  (CUDNN_VERSION >= ((major)*1000 + (minor)*100 + (patch)))

#define CUDNN_ENFORCE(condition)                                  \
  do {                                                            \
    cudnnStatus_t status = condition;                             \
    if (status != CUDNN_STATUS_SUCCESS) {                         \
      VLOG(1) << ::paddle::platform::cudnnGetErrorString(status); \
      PADDLE_THROW("cuDNN call failed");                          \
    }                                                             \
  } while (false)

D
"done"  
dzhwinter 已提交
71 72 73 74 75 76 77 78
#if CUDNN_VERSION < 6000
#pragma message "CUDNN version under 6.0 is supported at best effort."
#pragma message "We strongly encourage you to move to 6.0 and above."
#pragma message "This message is intended to annoy you enough to update."
#pragma message \
    "please see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/"
#endif  // CUDNN_VERSION < 6000

C
chengduoZH 已提交
79
enum class DataLayout {  // Not use
D
dangqingqing 已提交
80 81
  kNHWC,
  kNCHW,
C
chengduoZH 已提交
82
  kNCDHW,
D
dangqingqing 已提交
83 84 85 86 87 88
  kNCHW_VECT_C,
};

enum class PoolingMode {
  kMaximum,
  kAverage,
D
dzhwinter 已提交
89
  kMaximumDeterministic,
D
dangqingqing 已提交
90 91
};

D
dzhwinter 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104
inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) {
  switch (mode) {
    case PoolingMode::kMaximumDeterministic:
      return CUDNN_POOLING_MAX_DETERMINISTIC;
    case PoolingMode::kAverage:
      return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
    case PoolingMode::kMaximum:
      return CUDNN_POOLING_MAX;
    default:
      PADDLE_THROW("Unexpected pooling mode.");
  }
}

D
dangqingqing 已提交
105 106 107
template <typename T>
class CudnnDataType;

K
Kexin Zhao 已提交
108 109 110 111
template <>
class CudnnDataType<float16> {
 public:
  static const cudnnDataType_t type = CUDNN_DATA_HALF;
K
Kexin Zhao 已提交
112
  // The scaling param type is float for HALF and FLOAT tensors
K
update  
Kexin Zhao 已提交
113 114
  using ScalingParamType = const float;
  using BatchNormParamType = float;
K
Kexin Zhao 已提交
115
  static ScalingParamType* kOne() {
K
Kexin Zhao 已提交
116
    static ScalingParamType v = 1.0;
K
Kexin Zhao 已提交
117 118 119
    return &v;
  }
  static ScalingParamType* kZero() {
K
Kexin Zhao 已提交
120
    static ScalingParamType v = 0.0;
K
Kexin Zhao 已提交
121 122 123 124
    return &v;
  }
};

D
dangqingqing 已提交
125 126 127 128
template <>
class CudnnDataType<float> {
 public:
  static const cudnnDataType_t type = CUDNN_DATA_FLOAT;
K
update  
Kexin Zhao 已提交
129 130
  using ScalingParamType = const float;
  using BatchNormParamType = float;
Q
Qiao Longfei 已提交
131 132 133 134 135 136 137 138
  static ScalingParamType* kOne() {
    static ScalingParamType v = 1.0;
    return &v;
  }
  static ScalingParamType* kZero() {
    static ScalingParamType v = 0.0;
    return &v;
  }
D
dangqingqing 已提交
139 140 141 142 143 144
};

template <>
class CudnnDataType<double> {
 public:
  static const cudnnDataType_t type = CUDNN_DATA_DOUBLE;
K
update  
Kexin Zhao 已提交
145 146
  using ScalingParamType = const double;
  using BatchNormParamType = double;
Q
Qiao Longfei 已提交
147 148 149 150 151 152 153 154
  static ScalingParamType* kOne() {
    static ScalingParamType v = 1.0;
    return &v;
  }
  static ScalingParamType* kZero() {
    static ScalingParamType v = 0.0;
    return &v;
  }
D
dangqingqing 已提交
155 156
};

C
chengduoZH 已提交
157 158
inline cudnnTensorFormat_t GetCudnnTensorFormat(
    const DataLayout& order) {  // Not use
D
dangqingqing 已提交
159 160 161 162 163
  switch (order) {
    case DataLayout::kNHWC:
      return CUDNN_TENSOR_NHWC;
    case DataLayout::kNCHW:
      return CUDNN_TENSOR_NCHW;
C
chengduoZH 已提交
164
    case DataLayout::kNCDHW:
武毅 已提交
165
      return CUDNN_TENSOR_NCHW;  // NOTE: cudnn treat NdTensor as the same
D
dangqingqing 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
    default:
      PADDLE_THROW("Unknown cudnn equivalent for order");
  }
  return CUDNN_TENSOR_NCHW;
}

class ScopedTensorDescriptor {
 public:
  ScopedTensorDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnCreateTensorDescriptor(&desc_));
  }
  ~ScopedTensorDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnDestroyTensorDescriptor(desc_));
  }

  inline cudnnTensorDescriptor_t descriptor(const cudnnTensorFormat_t format,
                                            const cudnnDataType_t type,
武毅 已提交
183 184 185
                                            const std::vector<int>& dims,
                                            const int groups = 1) {
    // the format is not used now, will add later
D
dangqingqing 已提交
186 187
    std::vector<int> strides(dims.size());
    strides[dims.size() - 1] = 1;
188 189
    for (int i = dims.size() - 2; i >= 0; i--) {
      strides[i] = dims[i + 1] * strides[i + 1];
D
dangqingqing 已提交
190
    }
武毅 已提交
191
    // Update tensor descriptor dims setting if groups > 1
武毅 已提交
192
    // NOTE: Assume using NCHW or NCDHW order
武毅 已提交
193 194 195 196
    std::vector<int> dims_with_group(dims.begin(), dims.end());  // copy
    if (groups > 1) {
      dims_with_group[1] = dims_with_group[1] / groups;
    }
197
    PADDLE_ENFORCE(dynload::cudnnSetTensorNdDescriptor(
武毅 已提交
198 199
        desc_, type, dims_with_group.size(), dims_with_group.data(),
        strides.data()));
D
dangqingqing 已提交
200 201 202 203 204
    return desc_;
  }

  template <typename T>
  inline cudnnTensorDescriptor_t descriptor(const DataLayout& order,
武毅 已提交
205 206 207 208
                                            const std::vector<int>& dims,
                                            const int groups = 1) {
    return descriptor(GetCudnnTensorFormat(order), CudnnDataType<T>::type, dims,
                      groups);
D
dangqingqing 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
  }

 private:
  cudnnTensorDescriptor_t desc_;
  DISABLE_COPY_AND_ASSIGN(ScopedTensorDescriptor);
};

class ScopedFilterDescriptor {
 public:
  ScopedFilterDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnCreateFilterDescriptor(&desc_));
  }
  ~ScopedFilterDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnDestroyFilterDescriptor(desc_));
  }

  inline cudnnFilterDescriptor_t descriptor(const cudnnTensorFormat_t format,
                                            const cudnnDataType_t type,
武毅 已提交
227 228
                                            const std::vector<int>& kernel,
                                            const int groups = 1) {
C
chengduoZH 已提交
229
    // filter layout: MCHW(MCDHW), where M is the number of
武毅 已提交
230
    // output image channels, C is the number of input image channels,
C
chengduoZH 已提交
231 232
    // D is the depth of the filter, H is the height of the filter, and W is the
    // width of the filter.
武毅 已提交
233 234 235 236 237
    std::vector<int> kernel_with_group(kernel.begin(), kernel.end());
    if (groups > 1) {
      kernel_with_group[0] /= groups;
      // NOTE: input filter(C) of the filter is already asserted to be C/groups.
    }
238
    PADDLE_ENFORCE(dynload::cudnnSetFilterNdDescriptor(
武毅 已提交
239 240
        desc_, type, format, kernel_with_group.size(),
        kernel_with_group.data()));
D
dangqingqing 已提交
241 242 243 244 245
    return desc_;
  }

  template <typename T>
  inline cudnnFilterDescriptor_t descriptor(const DataLayout& order,
武毅 已提交
246 247
                                            const std::vector<int>& kernel,
                                            const int groups = 1) {
D
dangqingqing 已提交
248
    return descriptor(GetCudnnTensorFormat(order), CudnnDataType<T>::type,
武毅 已提交
249
                      kernel, groups);
D
dangqingqing 已提交
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
  }

 private:
  cudnnFilterDescriptor_t desc_;
  DISABLE_COPY_AND_ASSIGN(ScopedFilterDescriptor);
};

class ScopedConvolutionDescriptor {
 public:
  ScopedConvolutionDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnCreateConvolutionDescriptor(&desc_));
  }
  ~ScopedConvolutionDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnDestroyConvolutionDescriptor(desc_));
  }

  inline cudnnConvolutionDescriptor_t descriptor(
      cudnnDataType_t type, const std::vector<int>& pads,
      const std::vector<int>& strides, const std::vector<int>& dilations) {
    PADDLE_ENFORCE_EQ(pads.size(), strides.size());
    PADDLE_ENFORCE_EQ(pads.size(), dilations.size());
271

272
#if !CUDNN_VERSION_MIN(6, 0, 0)
273 274 275 276 277
    // cudnn v5 does not support dilation conv, the argument is called upscale
    // instead of dilations and it is must be one.
    for (size_t i = 0; i < dilations.size(); ++i) {
      PADDLE_ENFORCE_EQ(
          dilations[i], 1,
278 279 280
          "Dilations conv is not supported in this cuDNN version(%d.%d.%d).",
          CUDNN_VERSION / 1000, CUDNN_VERSION % 1000 / 100,
          CUDNN_VERSION % 100);
281 282 283
    }
#endif

K
Kexin Zhao 已提交
284 285
    cudnnDataType_t compute_type =
        (type == CUDNN_DATA_DOUBLE) ? CUDNN_DATA_DOUBLE : CUDNN_DATA_FLOAT;
286
    PADDLE_ENFORCE(dynload::cudnnSetConvolutionNdDescriptor(
D
dangqingqing 已提交
287
        desc_, pads.size(), pads.data(), strides.data(), dilations.data(),
K
Kexin Zhao 已提交
288
        CUDNN_CROSS_CORRELATION, compute_type));
289
    return desc_;
D
dangqingqing 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
  }

  template <typename T>
  inline cudnnConvolutionDescriptor_t descriptor(
      const std::vector<int>& pads, const std::vector<int>& strides,
      const std::vector<int>& dilations) {
    return descriptor(CudnnDataType<T>::type, pads, strides, dilations);
  }

 private:
  cudnnConvolutionDescriptor_t desc_;
  DISABLE_COPY_AND_ASSIGN(ScopedConvolutionDescriptor);
};

class ScopedPoolingDescriptor {
 public:
  ScopedPoolingDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnCreatePoolingDescriptor(&desc_));
  }
  ~ScopedPoolingDescriptor() {
    PADDLE_ENFORCE(dynload::cudnnDestroyPoolingDescriptor(desc_));
  }

  inline cudnnPoolingDescriptor_t descriptor(const PoolingMode& mode,
                                             const std::vector<int>& kernel,
                                             const std::vector<int>& pads,
                                             const std::vector<int>& strides) {
    PADDLE_ENFORCE_EQ(kernel.size(), pads.size());
    PADDLE_ENFORCE_EQ(kernel.size(), strides.size());
319
    PADDLE_ENFORCE(dynload::cudnnSetPoolingNdDescriptor(
D
dzhwinter 已提交
320
        desc_, (GetPoolingMode(mode)),
D
dangqingqing 已提交
321 322
        CUDNN_PROPAGATE_NAN,  // Always propagate nans.
        kernel.size(), kernel.data(), pads.data(), strides.data()));
323
    return desc_;
D
dangqingqing 已提交
324 325 326 327 328 329 330
  }

 private:
  cudnnPoolingDescriptor_t desc_;
  DISABLE_COPY_AND_ASSIGN(ScopedPoolingDescriptor);
};

331 332 333 334 335
inline bool CanCUDNNBeUsed(const framework::ExecutionContext& ctx) {
  bool use_cudnn = ctx.Attr<bool>("use_cudnn");
  use_cudnn &= paddle::platform::is_gpu_place(ctx.GetPlace());
#ifdef PADDLE_WITH_CUDA
  if (use_cudnn) {
336
    auto& dev_ctx = ctx.device_context<platform::CUDADeviceContext>();
337 338 339 340 341 342
    use_cudnn &= dev_ctx.cudnn_handle() != nullptr;
  }
#endif
  return use_cudnn;
}

D
dangqingqing 已提交
343 344
}  // namespace platform
}  // namespace paddle