batch_norm_op.cu 17.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include <algorithm>
Q
Qiao Longfei 已提交
16
#include <cfloat>
17 18 19
#include <string>
#include <vector>
#include "cub/cub.cuh"
S
Siddharth Goyal 已提交
20
#include "paddle/fluid/framework/data_layout.h"
21
#include "paddle/fluid/operators/batch_norm_op.h"
Y
Yi Wang 已提交
22 23
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/cudnn_helper.h"
K
Kexin Zhao 已提交
24
#include "paddle/fluid/platform/float16.h"
Q
Qiao Longfei 已提交
25 26 27 28 29

namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
Q
QI JUN 已提交
30
using DataLayout = framework::DataLayout;
Q
Qiao Longfei 已提交
31 32
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
K
Kexin Zhao 已提交
33
template <typename T>
K
update  
Kexin Zhao 已提交
34
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
Q
Qiao Longfei 已提交
35 36

template <typename T>
Q
QI JUN 已提交
37 38
class BatchNormKernel<platform::CUDADeviceContext, T>
    : public framework::OpKernel<T> {
Q
Qiao Longfei 已提交
39 40 41
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
D
dzhwinter 已提交
42
                   "It must use CUDAPlace.");
Q
Qiao Longfei 已提交
43 44 45
    double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
    const float momentum = ctx.Attr<float>("momentum");
    const bool is_test = ctx.Attr<bool>("is_test");
46
    const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
Q
QI JUN 已提交
47 48 49
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
Q
Qiao Longfei 已提交
50 51 52 53 54

    // Get the size for each dimension.
    // NCHW [batch_size, in_channels, in_height, in_width]
    const auto *x = ctx.Input<Tensor>("X");
    const auto &x_dims = x->dims();
55 56
    PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
                   "The Input dim size should be between 2 and 5");
Q
Qiao Longfei 已提交
57
    int N, C, H, W, D;
Q
QI JUN 已提交
58
    ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
Q
Qiao Longfei 已提交
59

60 61 62
    auto *y = ctx.Output<Tensor>("Y");
    y->mutable_data<T>(ctx.GetPlace());

Q
Qiao Longfei 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
    // ------------------- cudnn descriptors ---------------------
    cudnnTensorDescriptor_t data_desc_;
    cudnnTensorDescriptor_t bn_param_desc_;
    cudnnBatchNormMode_t mode_;

    CUDNN_ENFORCE(platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
    CUDNN_ENFORCE(
        platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));

    if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
      LOG(ERROR) << "Provided epsilon is smaller than "
                 << "CUDNN_BN_MIN_EPSILON. Setting it to "
                 << "CUDNN_BN_MIN_EPSILON instead.";
    }
    epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
78 79 80 81 82 83 84

    // TODO(dengkaipeng): use PERSISTENT mode in training may incur errors
    // in inference period, cuDNN fixed issues on PERSISTENT mode in version
    // 7.0.2, 7.0.4 and 7.3.0, we disable this mode currently.
/* #if CUDNN_VERSION_MIN(7, 0, 0) */
/*     mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; */
/* #else */
Q
Qiao Longfei 已提交
85
    mode_ = CUDNN_BATCHNORM_SPATIAL;
86
/* #endif */
Q
Qiao Longfei 已提交
87

M
minqiyang 已提交
88
    VLOG(3) << "Setting descriptors.";
Q
Qiao Longfei 已提交
89 90
    std::vector<int> dims;
    std::vector<int> strides;
Q
QI JUN 已提交
91
    if (data_layout == DataLayout::kNCHW) {
Q
Qiao Longfei 已提交
92 93 94 95 96 97 98 99 100
      dims = {N, C, H, W, D};
      strides = {C * H * W * D, H * W * D, W * D, D, 1};
    } else {
      dims = {N, C, H, W, D};
      strides = {H * W * D * C, 1, W * D * C, D * C, C};
    }
    CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor(
        data_desc_, CudnnDataType<T>::type,
        x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
K
Kexin Zhao 已提交
101
    // Note: PERSISTENT not implemented for inference
Q
Qiao Longfei 已提交
102
    CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor(
K
Kexin Zhao 已提交
103
        bn_param_desc_, data_desc_, is_test ? CUDNN_BATCHNORM_SPATIAL : mode_));
Q
Qiao Longfei 已提交
104 105 106 107

    const auto *scale = ctx.Input<Tensor>("Scale");
    const auto *bias = ctx.Input<Tensor>("Bias");

Q
QI JUN 已提交
108
    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
Q
Qiao Longfei 已提交
109

Q
QI JUN 已提交
110
    auto handle = dev_ctx.cudnn_handle();
Q
Qiao Longfei 已提交
111 112

    // Now, depending on whether we are running test or not, we have two paths.
113
    if (is_test || use_global_stats) {
Q
Qiao Longfei 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
      // only when test we use input to do computation.
      const auto *est_mean = ctx.Input<Tensor>("Mean");
      const auto *est_var = ctx.Input<Tensor>("Variance");
      // Run inference mode.
      PADDLE_ENFORCE_EQ(est_mean->dims().size(), 1UL);
      PADDLE_ENFORCE_EQ(est_var->dims().size(), 1UL);
      PADDLE_ENFORCE_EQ(est_mean->dims()[0], C);
      PADDLE_ENFORCE_EQ(est_var->dims()[0], C);

      CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardInference(
          handle,
          // Note: PERSISTENT not implemented for inference
          CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(),
          CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(),
          data_desc_, y->template mutable_data<T>(ctx.GetPlace()),
K
update  
Kexin Zhao 已提交
129 130 131 132
          bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
          bias->template data<BatchNormParamType<T>>(),
          est_mean->template data<BatchNormParamType<T>>(),
          est_var->template data<BatchNormParamType<T>>(), epsilon));
Q
Qiao Longfei 已提交
133 134 135 136
    } else {
      // Run training mode.
      // obtain running mean and running inv var, and see if we need to
      // initialize them.
D
Dang Qingqing 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151

      auto *mean_out = ctx.Output<Tensor>("MeanOut");
      auto *variance_out = ctx.Output<Tensor>("VarianceOut");
      mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
      variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());

      auto *saved_mean = ctx.Output<Tensor>("SavedMean");
      auto *saved_variance = ctx.Output<Tensor>("SavedVariance");
      saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
      saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
      math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
          functor;
      functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0));
      functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0));

152 153 154
      if ((N * H * W * D) == 1) {
        LOG(WARNING) << "Only 1 element in normalization dimension, "
                     << "we skip the batch norm calculation, let y = x.";
155
        framework::TensorCopy(*x, ctx.GetPlace(), y);
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
      } else {
        double this_factor = 1. - momentum;

        CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining(
            handle, mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(),
            data_desc_, x->template data<T>(), data_desc_,
            y->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_,
            scale->template data<BatchNormParamType<T>>(),
            bias->template data<BatchNormParamType<T>>(), this_factor,
            mean_out->template mutable_data<BatchNormParamType<T>>(
                ctx.GetPlace()),
            variance_out->template mutable_data<BatchNormParamType<T>>(
                ctx.GetPlace()),
            epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>(
                         ctx.GetPlace()),
            saved_variance->template mutable_data<BatchNormParamType<T>>(
                ctx.GetPlace())));
      }
Q
Qiao Longfei 已提交
174 175 176 177 178 179 180 181 182
    }

    // clean when exit.
    CUDNN_ENFORCE(platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
    CUDNN_ENFORCE(
        platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
  }
};

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
template <typename T, int BlockDim, framework::DataLayout layout>
static __global__ void KeBNBackwardScaleBias(
    const T *dy, const T *x, const BatchNormParamType<T> *mean,
    const BatchNormParamType<T> *variance, const double epsilon, const int N,
    const int C, const int HxW, BatchNormParamType<T> *dscale,
    BatchNormParamType<T> *dbias) {
  const int outer_size = C;
  const int inner_size = N * HxW;
  typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
  __shared__ typename BlockReduce::TempStorage ds_storage;
  __shared__ typename BlockReduce::TempStorage db_storage;

  for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
    BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
    BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);

    BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
    BatchNormParamType<T> mean_i = mean[i];
    for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
      const int index = layout == framework::DataLayout::kNCHW
                            ? (j / HxW * C + i) * HxW + j % HxW
                            : j * outer_size + i;
      ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
                (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
      db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
    }
    ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
    db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
    if (threadIdx.x == 0) {
      dscale[i] = ds_sum * inv_var_i;
      dbias[i] = db_sum;
    }
    __syncthreads();
  }
}

Q
qingqing01 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
template <typename T, framework::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
                                        const BatchNormParamType<T> *scale,
                                        const BatchNormParamType<T> *variance,
                                        const double epsilon, const int C,
                                        const int HxW, const int num, T *dx) {
  int gid = blockIdx.x * blockDim.x + threadIdx.x;
  int stride = blockDim.x * gridDim.x;
  for (int i = gid; i < num; i += stride) {
    const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C;
    BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
    dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
                           scale[c] * inv_var);
  }
}

Q
Qiao Longfei 已提交
235
template <typename T>
Q
QI JUN 已提交
236
class BatchNormGradKernel<platform::CUDADeviceContext, T>
Q
Qiao Longfei 已提交
237 238 239 240
    : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
D
dzhwinter 已提交
241
                   "It must use CUDAPlace.");
Q
Qiao Longfei 已提交
242
    double epsilon = static_cast<double>(ctx.Attr<float>("epsilon"));
Q
QI JUN 已提交
243
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
244 245
    const bool use_global_stats = ctx.Attr<bool>("use_global_stats");

Q
QI JUN 已提交
246 247
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
Q
Qiao Longfei 已提交
248 249 250 251 252 253
    const auto *x = ctx.Input<Tensor>("X");
    const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
    const auto *scale = ctx.Input<Tensor>("Scale");

    const auto &x_dims = x->dims();

254 255
    PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
                   "The Input dim size should be between 2 and 5");
Q
Qiao Longfei 已提交
256
    int N, C, H, W, D;
Q
QI JUN 已提交
257
    ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
Q
Qiao Longfei 已提交
258

259 260 261 262 263 264
    // init output
    auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
    auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));

    d_x->mutable_data<T>(ctx.GetPlace());
265 266 267
    if (d_scale && d_bias) {
      d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
      d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace());
268
    }
Q
Qiao Longfei 已提交
269 270 271
    PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL);
    PADDLE_ENFORCE_EQ(scale->dims()[0], C);

Z
zchen0211 已提交
272 273
    std::vector<int> dims;
    std::vector<int> strides;
Q
QI JUN 已提交
274
    if (data_layout == DataLayout::kNCHW) {
Z
zchen0211 已提交
275 276 277 278 279 280
      dims = {N, C, H, W, D};
      strides = {C * H * W * D, H * W * D, W * D, D, 1};
    } else {
      dims = {N, C, H, W, D};
      strides = {H * W * C * D, 1, W * D * C, D * C, C};
    }
Q
Qiao Longfei 已提交
281

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    if (!use_global_stats) {
      if ((N * H * W * D) == 1) {
        framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
        math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>>
            functor;
        functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
        functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
        return;
      }

      // ------------------- cudnn descriptors ---------------------
      cudnnTensorDescriptor_t data_desc_;
      cudnnTensorDescriptor_t bn_param_desc_;
      cudnnBatchNormMode_t mode_;

      CUDNN_ENFORCE(
          platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
      CUDNN_ENFORCE(
          platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_));
      if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
        LOG(ERROR) << "Provided epsilon is smaller than "
                   << "CUDNN_BN_MIN_EPSILON. Setting it to "
                   << "CUDNN_BN_MIN_EPSILON instead.";
      }
      epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
308 309 310 311 312 313 314

    // TODO(dengkaipeng): use PERSISTENT mode in training may incur errors
    // in inference period, cuDNN fixed issues on PERSISTENT mode in version
    // 7.0.2, 7.0.4 and 7.3.0, we disable this mode currently.
/* #if CUDNN_VERSION_MIN(7, 0, 0) */
/*       mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; */
/* #else */
315
      mode_ = CUDNN_BATCHNORM_SPATIAL;
316
/* #endif */
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373

      CUDNN_ENFORCE(platform::dynload::cudnnSetTensorNdDescriptor(
          data_desc_, CudnnDataType<T>::type,
          x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data()));
      CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor(
          bn_param_desc_, data_desc_, mode_));

      const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
      const auto *saved_var = ctx.Input<Tensor>("SavedVariance");
      const void *saved_mean_data =
          saved_mean->template data<BatchNormParamType<T>>();
      const void *saved_var_data =
          saved_var->template data<BatchNormParamType<T>>();

      CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward(
          dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
          CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
          CudnnDataType<T>::kZero(), data_desc_, x->template data<T>(),
          data_desc_, d_y->template data<T>(), data_desc_,
          d_x->template mutable_data<T>(ctx.GetPlace()), bn_param_desc_,
          scale->template data<BatchNormParamType<T>>(),
          d_scale->template mutable_data<BatchNormParamType<T>>(ctx.GetPlace()),
          d_bias->template mutable_data<BatchNormParamType<T>>(ctx.GetPlace()),
          epsilon, saved_mean_data, saved_var_data));

      // clean when exit.
      CUDNN_ENFORCE(
          platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
      CUDNN_ENFORCE(
          platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_));
    } else {
      const auto *running_mean = ctx.Input<Tensor>("Mean");
      const auto *running_var = ctx.Input<Tensor>("Variance");

      const auto *running_mean_data =
          running_mean->template data<BatchNormParamType<T>>();
      const auto *running_var_data =
          running_var->template data<BatchNormParamType<T>>();

      const int num = x->numel();
      const int block = 512;
      int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
      const int max_blocks = std::max(max_threads / block, 1);
      int grid1 = (num + block - 1) / block;
      int grid2 = std::min(C, max_blocks);

      if (data_layout == framework::DataLayout::kNCHW) {
        if (d_x) {
          KeBNBackwardData<T, framework::DataLayout::kNCHW><<<
              grid1, block, 0, dev_ctx.stream()>>>(
              d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
              running_var_data, epsilon, C, H * W, num, d_x->data<T>());
        }
        if (d_scale && d_bias) {
          KeBNBackwardScaleBias<T, block, framework::DataLayout::kNCHW><<<
              grid2, block, 0, dev_ctx.stream()>>>(
              d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
Q
qingqing01 已提交
374
              epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
375 376 377 378 379 380 381 382 383 384
              d_bias->data<BatchNormParamType<T>>());
        }
      } else {
        if (d_x) {
          KeBNBackwardData<T, framework::DataLayout::kNHWC><<<
              grid1, block, 0, dev_ctx.stream()>>>(
              d_y->data<T>(), scale->data<BatchNormParamType<T>>(),
              running_var_data, epsilon, C, H * W, num, d_x->data<T>());
        }
        if (d_scale && d_bias) {
Q
qingqing01 已提交
385
          KeBNBackwardScaleBias<T, block, framework::DataLayout::kNHWC><<<
386 387
              grid2, block, 0, dev_ctx.stream()>>>(
              d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data,
Q
qingqing01 已提交
388
              epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(),
389 390 391 392
              d_bias->data<BatchNormParamType<T>>());
        }
      }
    }
Q
Qiao Longfei 已提交
393 394 395 396 397 398 399
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
K
Kexin Zhao 已提交
400
namespace plat = paddle::platform;
Q
QI JUN 已提交
401
REGISTER_OP_CUDA_KERNEL(
K
Kexin Zhao 已提交
402
    batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>,
D
dzhwinter 已提交
403
    ops::BatchNormKernel<plat::CUDADeviceContext, double>,
K
Kexin Zhao 已提交
404
    ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>);
Q
QI JUN 已提交
405
REGISTER_OP_CUDA_KERNEL(
D
dzhwinter 已提交
406
    batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>,
C
chengduo 已提交
407 408
    ops::BatchNormGradKernel<plat::CUDADeviceContext, double>,
    ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>);