batch_norm_op.cc 34.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/batch_norm_op.h"
Q
qingqing01 已提交
16
#include <memory>
S
Siddharth Goyal 已提交
17
#include <string>
Q
qingqing01 已提交
18
#include <unordered_map>
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/data_layout.h"
20 21 22
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Q
Qiao Longfei 已提交
23 24 25 26

namespace paddle {
namespace operators {

Q
qingqing01 已提交
27
void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
  PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
                    platform::errors::InvalidArgument(
                        "Input(X) of BatchNormOp should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasInput("Scale"), true,
                    platform::errors::InvalidArgument(
                        "Input(Scale) of BatchNormOp should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasInput("Bias"), true,
                    platform::errors::InvalidArgument(
                        "Input(Bias) of BatchNormOp should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasInput("Mean"), true,
                    platform::errors::InvalidArgument(
                        "Input(Mean) of BatchNormOp should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasInput("Variance"), true,
                    platform::errors::InvalidArgument(
                        "Input(Variance) of BatchNormOp should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasOutput("Y"), true,
                    platform::errors::InvalidArgument(
                        "Output(Y) of BatchNormOp should not be null."));
Q
qingqing01 已提交
46 47
  bool is_test = ctx->Attrs().Get<bool>("is_test");
  if (!is_test) {
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("MeanOut"), true,
        platform::errors::InvalidArgument(
            "Output(MeanOut) of BatchNormOp should not be null."));
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("VarianceOut"), true,
        platform::errors::InvalidArgument(
            "Output(VarianceOut) of BatchNormOp should not be null."));
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("SavedMean"), true,
        platform::errors::InvalidArgument(
            "Output(SavedMean) of BatchNormOp should not be null."));
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("SavedVariance"), true,
        platform::errors::InvalidArgument(
            "Output(SavedVariance) of BatchNormOp should not be null."));
Q
Qiao Longfei 已提交
64
  }
K
Kexin Zhao 已提交
65

Q
qingqing01 已提交
66 67 68 69 70 71 72 73 74 75
  // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
  PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0],
                    "Mean and MeanOut should share the same memory");
  PADDLE_ENFORCE_EQ(ctx->Inputs("Variance")[0], ctx->Outputs("VarianceOut")[0],
                    "Variance and VarianceOut should share the same memory");

  const auto x_dims = ctx->GetInputDim("X");
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));

76 77 78 79 80 81 82
  if (ctx->IsRuntime() && ctx->HasInput("MomentumTensor")) {
    auto mom = ctx->Inputs("MomentumTensor");
    PADDLE_ENFORCE_EQ(mom.size(), 1,
                      platform::errors::InvalidArgument(
                          "Input(MomentumTensor) size must be 1"));
  }

83 84
  PADDLE_ENFORCE_GE(
      x_dims.size(), 2,
K
Kaipeng Deng 已提交
85 86 87 88 89
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input "
          "X must greater than or equal to 2. But received: the shape of input "
          "X = [%s], the dimension of input X =[%d]",
          x_dims, x_dims.size()));
90 91
  PADDLE_ENFORCE_LE(
      x_dims.size(), 5,
K
Kaipeng Deng 已提交
92 93 94 95 96
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input X "
          "must smaller than or equal to 5. But received: the shape of input X "
          "= [%s], the dimension of input X = [%d]",
          x_dims, x_dims.size()));
Q
qingqing01 已提交
97 98

  const int64_t C =
99 100 101
      ((this->IsMKLDNNType() == true) || (data_layout == DataLayout::kNCHW)
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);
Q
qingqing01 已提交
102

103 104
  auto scale_dim = ctx->GetInputDim("Scale");
  auto bias_dim = ctx->GetInputDim("Bias");
Q
qingqing01 已提交
105

106 107 108 109 110 111 112 113 114 115
  PADDLE_ENFORCE_EQ(scale_dim.size(), 1UL,
                    "ShapeError: the dimension of scale must equal to 1."
                    "But received: the shape of scale is [%s], the dimension "
                    "of scale is [%d]",
                    scale_dim, scale_dim.size());
  PADDLE_ENFORCE_EQ(
      bias_dim.size(), 1UL,
      "ShapeError: the dimension of bias must equal to 1."
      "But received: the shape of bias is [%s],the dimension of bias is [%d]",
      bias_dim, bias_dim.size());
C
ceci3 已提交
116

117 118 119 120 121 122 123
  bool check = true;
  if ((!ctx->IsRuntime()) && (framework::product(scale_dim) <= 0 ||
                              framework::product(bias_dim) <= 0)) {
    check = false;
  }

  if (check) {
124 125 126 127 128 129 130 131
    PADDLE_ENFORCE_EQ(scale_dim[0], C,
                      "ShapeError: the shape of scale must equal to [%d]"
                      "But received: the shape of scale is [%d]",
                      C, scale_dim[0]);
    PADDLE_ENFORCE_EQ(bias_dim[0], C,
                      "ShapeError: the shape of bias must equal to [%d]"
                      "But received: the shape of bias is [%d]",
                      C, bias_dim[0]);
132
  }
Q
qingqing01 已提交
133 134 135 136 137 138 139 140 141 142
  ctx->SetOutputDim("Y", x_dims);
  ctx->SetOutputDim("MeanOut", {C});
  ctx->SetOutputDim("VarianceOut", {C});
  ctx->SetOutputDim("SavedMean", {C});
  ctx->SetOutputDim("SavedVariance", {C});
  ctx->ShareLoD("X", "Y");
}

framework::OpKernelType BatchNormOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
143
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
Q
qingqing01 已提交
144 145 146 147 148 149 150
  // By default, the type of the scale, bias, mean,
  // and var tensors should both be float. (For float or float16 input tensor)
  // or double (For double input tensor).
  auto bn_param_type = framework::proto::VarType::FP32;
  if (input_data_type == framework::proto::VarType::FP64) {
    bn_param_type = framework::proto::VarType::FP64;
  }
K
Kaipeng Deng 已提交
151 152 153 154 155 156 157 158 159
  PADDLE_ENFORCE_EQ(
      bn_param_type, ctx.Input<Tensor>("Scale")->type(),
      platform::errors::InvalidArgument("Scale input should be of float type"));
  PADDLE_ENFORCE_EQ(
      bn_param_type, ctx.Input<Tensor>("Bias")->type(),
      platform::errors::InvalidArgument("Bias input should be of float type"));
  PADDLE_ENFORCE_EQ(
      bn_param_type, ctx.Input<Tensor>("Mean")->type(),
      platform::errors::InvalidArgument("Mean input should be of float type"));
Q
qingqing01 已提交
160
  PADDLE_ENFORCE_EQ(bn_param_type, ctx.Input<Tensor>("Variance")->type(),
K
Kaipeng Deng 已提交
161 162
                    platform::errors::InvalidArgument(
                        "Variance input should be of float type"));
Q
qingqing01 已提交
163 164 165 166

  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::LibraryType library = framework::LibraryType::kPlain;
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
167
#ifdef PADDLE_WITH_MKLDNN
Q
qingqing01 已提交
168 169 170 171
  if (library == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library = framework::LibraryType::kMKLDNN;
    layout = framework::DataLayout::kMKLDNN;
K
Kexin Zhao 已提交
172
  }
Q
qingqing01 已提交
173
#endif
Q
Qiao Longfei 已提交
174

Q
qingqing01 已提交
175 176 177 178
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
}

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
framework::OpKernelType BatchNormOp::GetKernelTypeForVar(
    const std::string &var_name, const Tensor &tensor,
    const framework::OpKernelType &expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "X") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
    auto dl = framework::StringToDataLayout(data_layout);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
195 196
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), dl);
197 198 199 200 201 202 203
    }
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

Q
qingqing01 已提交
204 205 206 207 208 209 210 211 212
void BatchNormOpMaker::Make() {
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
  AddAttr<float>("momentum", "").SetDefault(0.9);
  AddAttr<float>("epsilon", "")
      .SetDefault(1e-5)
      .AddCustomChecker([](const float &epsilon) {
K
Kaipeng Deng 已提交
213 214 215 216 217 218 219
        PADDLE_ENFORCE_GE(
            epsilon, 0.0f,
            platform::errors::InvalidArgument(
                "'epsilon' should be greater or equal than 0.0."));
        PADDLE_ENFORCE_LE(epsilon, 0.001f,
                          platform::errors::InvalidArgument(
                              "'epsilon' should be less or equal than 0.001."));
Q
qingqing01 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
      });
  AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
  AddInput("X", "The input tensor");
  AddInput("Scale",
           "Scale is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Bias",
           "Bias is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Mean",
           "The global mean (for training) or "
           "estimated mean (for testing)");
  AddInput("Variance",
           "The global variance (for training) "
           "or estimated Variance (for testing)");
235 236 237 238 239
  AddInput("MomentumTensor",
           "(Tensor<float32>, optional) If provided, batch_norm will "
           "use this as momentum, this has a higher priority than "
           "attr(momentum), the shape of this tensor MUST BE [1].")
      .AsDispensable();
Q
qingqing01 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
  AddOutput("Y", "result after normalization");
  AddOutput("MeanOut",
            "Share memory with Mean. "
            "Store the global mean when training");
  AddOutput("VarianceOut",
            "Share memory with Variance. "
            "Store the global Variance when training");
  AddOutput("SavedMean",
            "Mean of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
  AddOutput("SavedVariance",
            "Variance of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
255 256 257 258
  AddOutput("ReserveSpace",
            "Reserve GPU space for triggering the new semi-persistent "
            "NHWC kernel")
      .AsDispensable();
Q
qingqing01 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
  AddAttr<bool>("fuse_with_relu",
                "(bool, default false) Only used in mkldnn kernel")
      .SetDefault(false);
  AddAttr<bool>("use_global_stats",
                "(bool, default false) Whether to use global mean and "
                "variance. In inference or test mode, set use_global_stats "
                "to true or is_test true. the behavior is equivalent. "
                "In train mode, when setting use_global_stats True, the "
                "global mean and variance are also used during train time, "
                "the BN acts as scaling and shiffting.")
      .SetDefault(false);
K
Kaipeng Deng 已提交
273

Q
qingqing01 已提交
274
  AddComment(R"DOC(
275
Batch Normalization.
Q
Qiao Longfei 已提交
276

277 278 279 280 281 282
Batch Norm has been implemented as discussed in the paper:
https://arxiv.org/pdf/1502.03167.pdf
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Q
Qiao Longfei 已提交
283 284

)DOC");
Q
qingqing01 已提交
285
}
C
chengduo 已提交
286

Q
Qiao Longfei 已提交
287
template <typename T>
Q
QI JUN 已提交
288 289
class BatchNormKernel<platform::CPUDeviceContext, T>
    : public framework::OpKernel<T> {
Q
Qiao Longfei 已提交
290 291 292
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const float epsilon = ctx.Attr<float>("epsilon");
293
    float momentum = ctx.Attr<float>("momentum");
Q
Qiao Longfei 已提交
294
    const bool is_test = ctx.Attr<bool>("is_test");
295 296 297 298
    const bool use_global_stats = ctx.Attr<bool>("use_global_stats");

    bool global_stats = is_test || use_global_stats;

Q
QI JUN 已提交
299 300 301
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
Q
Qiao Longfei 已提交
302 303 304

    const auto *x = ctx.Input<Tensor>("X");
    const auto &x_dims = x->dims();
K
Kaipeng Deng 已提交
305 306 307 308 309 310
    PADDLE_ENFORCE_GE(x_dims.size(), 2,
                      platform::errors::InvalidArgument(
                          "The Input X dim size should be larger than 1."));
    PADDLE_ENFORCE_LE(x_dims.size(), 5,
                      platform::errors::InvalidArgument(
                          "The Input X dim size should be less than 6."));
Q
Qiao Longfei 已提交
311 312
    const int N = x_dims[0];
    const int C =
Q
QI JUN 已提交
313 314
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
Q
Qiao Longfei 已提交
315 316 317
    const int sample_size = x->numel() / N / C;

    auto *y = ctx.Output<Tensor>("Y");
K
Kaipeng Deng 已提交
318

Q
Qiao Longfei 已提交
319 320 321 322 323 324 325 326 327 328 329 330
    auto *mean_out = ctx.Output<Tensor>("MeanOut");
    auto *variance_out = ctx.Output<Tensor>("VarianceOut");
    auto *saved_mean = ctx.Output<Tensor>("SavedMean");
    auto *saved_variance = ctx.Output<Tensor>("SavedVariance");

    // alloc memory
    y->mutable_data<T>(ctx.GetPlace());
    mean_out->mutable_data<T>(ctx.GetPlace());
    variance_out->mutable_data<T>(ctx.GetPlace());
    saved_mean->mutable_data<T>(ctx.GetPlace());
    saved_variance->mutable_data<T>(ctx.GetPlace());

331
    if (!global_stats) {
Q
Qiao Longfei 已提交
332 333 334 335 336 337 338 339
      // saved_xx is use just in this batch of data
      EigenVectorArrayMap<T> saved_mean_e(
          saved_mean->mutable_data<T>(ctx.GetPlace()), C);
      EigenVectorArrayMap<T> saved_variance_e(
          saved_variance->mutable_data<T>(ctx.GetPlace()), C);
      saved_mean_e.setZero();
      saved_variance_e.setZero();

340 341 342 343 344 345
      EigenVectorArrayMap<T> running_mean_arr(
          mean_out->mutable_data<T>(ctx.GetPlace()), C);
      EigenVectorArrayMap<T> running_var_arr(
          variance_out->mutable_data<T>(ctx.GetPlace()), C);

      if ((N * sample_size) == 1) {
346 347
        // Only 1 element in normalization dimension,
        // we skip the batch norm calculation, let y = x.
348
        framework::TensorCopy(*x, ctx.GetPlace(), y);
349 350 351
        return;
      }

Q
QI JUN 已提交
352 353
      switch (data_layout) {
        case DataLayout::kNCHW: {
Q
Qiao Longfei 已提交
354 355 356 357 358 359 360 361 362 363 364 365
          ConstEigenArrayMap<T> x_arr(x->data<T>(), sample_size, N * C);
          for (int nc = 0; nc < N * C; ++nc) {
            saved_mean_e(nc % C) += x_arr.col(nc).sum();
          }
          saved_mean_e /= N * sample_size;
          for (int nc = 0; nc < N * C; ++nc) {
            saved_variance_e(nc % C) +=
                (x_arr.col(nc) - saved_mean_e(nc % C)).matrix().squaredNorm();
          }
          saved_variance_e /= N * sample_size;
          break;
        }
Q
QI JUN 已提交
366
        case DataLayout::kNHWC: {
Q
Qiao Longfei 已提交
367 368 369 370 371 372 373 374 375 376 377 378 379
          ConstEigenArrayMap<T> x_arr(x->data<T>(), C, N * sample_size);
          for (int i = 0; i < N * sample_size; ++i) {
            saved_mean_e += x_arr.col(i);
          }
          saved_mean_e /= N * sample_size;
          for (int i = 0; i < N * sample_size; ++i) {
            saved_variance_e +=
                (x_arr.col(i) - saved_mean_e) * (x_arr.col(i) - saved_mean_e);
          }
          saved_variance_e /= N * sample_size;
          break;
        }
        default:
Q
QI JUN 已提交
380
          PADDLE_THROW("Unknown storage order: %s", data_layout_str);
Q
Qiao Longfei 已提交
381 382
      }

383 384 385 386 387 388 389
      // if MomentumTensor is set, use MomentumTensor value, momentum
      // is only used in this training branch
      if (ctx.HasInput("MomentumTensor")) {
        const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor");
        momentum = mom_tensor->data<float>()[0];
      }

Q
Qiao Longfei 已提交
390 391 392 393 394 395 396 397
      running_mean_arr =
          running_mean_arr * momentum + saved_mean_e * (1. - momentum);
      running_var_arr =
          running_var_arr * momentum + saved_variance_e * (1. - momentum);
    }

    // use SavedMean and SavedVariance to do normalize
    Eigen::Array<T, Eigen::Dynamic, 1> inv_std(C);
398
    if (global_stats) {
Q
Qiao Longfei 已提交
399 400 401 402 403 404 405 406 407 408 409
      ConstEigenVectorArrayMap<T> var_arr(
          ctx.Input<Tensor>("Variance")->data<T>(), C);
      inv_std = (var_arr + epsilon).sqrt().inverse();
    } else {
      EigenVectorArrayMap<T> saved_inv_std(
          ctx.Output<Tensor>("SavedVariance")->data<T>(), C);
      // inverse SavedVariance first, gradient will use it too.
      saved_inv_std = (saved_inv_std + epsilon).inverse().sqrt();
      inv_std = saved_inv_std;
    }
    ConstEigenVectorArrayMap<T> mean_arr(
410 411
        global_stats ? ctx.Input<Tensor>("Mean")->data<T>()
                     : ctx.Output<Tensor>("SavedMean")->data<T>(),
Q
Qiao Longfei 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424
        C);

    //   ((x - est_mean) * (inv_var) * scale + bias
    //   formula transform ====>
    //   (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
    const auto *scale = ctx.Input<Tensor>("Scale");
    const auto *bias = ctx.Input<Tensor>("Bias");
    ConstEigenVectorArrayMap<T> scale_arr(scale->data<T>(), C);
    ConstEigenVectorArrayMap<T> bias_arr(bias->data<T>(), C);
    Eigen::Array<T, Eigen::Dynamic, 1> new_scale = inv_std * scale_arr;
    Eigen::Array<T, Eigen::Dynamic, 1> new_bias =
        bias_arr - mean_arr * inv_std * scale_arr;

Q
QI JUN 已提交
425 426
    switch (data_layout) {
      case DataLayout::kNCHW: {
Q
Qiao Longfei 已提交
427 428 429 430 431 432 433 434
        EigenArrayMap<T> y_arr(y->mutable_data<T>(ctx.GetPlace()), sample_size,
                               N * C);
        ConstEigenArrayMap<T> x_arr(x->data<T>(), sample_size, N * C);
        for (int nc = 0; nc < N * C; ++nc) {
          y_arr.col(nc) = x_arr.col(nc) * new_scale(nc % C) + new_bias(nc % C);
        }
        break;
      }
Q
QI JUN 已提交
435
      case DataLayout::kNHWC: {
Q
Qiao Longfei 已提交
436 437 438 439 440 441 442 443 444
        EigenArrayMap<T>(y->mutable_data<T>(ctx.GetPlace()), C,
                         N * sample_size) =
            (ConstEigenArrayMap<T>(x->data<T>(), C, N * sample_size).colwise() *
             new_scale)
                .colwise() +
            new_bias;
        break;
      }
      default:
Q
QI JUN 已提交
445
        PADDLE_THROW("Unknown storage order: %d", data_layout);
Q
Qiao Longfei 已提交
446 447 448 449
    }
  }
};

Q
qingqing01 已提交
450 451
void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
  // check input
K
Kaipeng Deng 已提交
452 453 454 455 456 457 458 459 460 461 462 463
  PADDLE_ENFORCE_EQ(
      ctx->HasInput("Scale"), true,
      platform::errors::InvalidArgument("Input(scale) should not be null."));
  PADDLE_ENFORCE_EQ(
      ctx->HasInput(framework::GradVarName("Y")), true,
      platform::errors::InvalidArgument("Input(Y@GRAD) should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasInput("SavedMean"), true,
                    platform::errors::InvalidArgument(
                        "Input(SavedMean) should not be null."));
  PADDLE_ENFORCE_EQ(ctx->HasInput("SavedVariance"), true,
                    platform::errors::InvalidArgument(
                        "Input(SavedVariance) should not be null"));
Q
qingqing01 已提交
464 465 466

  // check output
  PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "");
467 468 469 470 471 472 473 474 475 476 477

  const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale"));
  const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias"));

  PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad), true,
                    platform::errors::InvalidArgument(
                        "Output(Scale@GRAD) and Output(Bias@GRAD) must be null "
                        "or not be null at same time. But now, "
                        "has Scale@Grad=[%d], has Bias@GRAD=[%d]",
                        has_scale_grad, has_bias_grad));

Q
qingqing01 已提交
478 479
  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
K
Kaipeng Deng 已提交
480 481 482 483 484
    PADDLE_ENFORCE_EQ(
        !ctx->Attrs().Get<bool>("use_mkldnn"), true,
        platform::errors::InvalidArgument(
            "Using global stats during training is not supported "
            "in gradient op kernel of batch_norm_mkldnn_op now."));
Q
qingqing01 已提交
485
  }
Q
Qiao Longfei 已提交
486

K
Kaipeng Deng 已提交
487 488 489 490 491 492 493 494 495 496 497 498
  // batch_norm_grad with inplace takes Y as input, without inplace
  // takes X as input. HasInput will throw exception in compile time,
  // so only infer shape in run time here.
  if (ctx->IsRuntime()) {
    PADDLE_ENFORCE_EQ(ctx->HasInput("X") || ctx->HasInput("Y"), true,
                      platform::errors::InvalidArgument(
                          "Input(X) and Input(Y) should not be all null."));
    auto input_name = "Y";
    if (ctx->HasInput("X")) input_name = "X";
    const auto x_dims = ctx->GetInputDim(input_name);
    const DataLayout data_layout = framework::StringToDataLayout(
        ctx->Attrs().Get<std::string>("data_layout"));
Q
Qiao Longfei 已提交
499

K
Kaipeng Deng 已提交
500 501 502 503 504 505 506 507 508 509 510
    const int C =
        ((this->IsMKLDNNType() == true) || (data_layout == DataLayout::kNCHW)
             ? x_dims[1]
             : x_dims[x_dims.size() - 1]);

    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
    // has_scale_grad == has_bias_grad, judge has_scale_grad is enough
    if (has_scale_grad) {
      ctx->SetOutputDim(framework::GradVarName("Scale"), {C});
      ctx->SetOutputDim(framework::GradVarName("Bias"), {C});
    }
Q
Qiao Longfei 已提交
511
  }
Q
qingqing01 已提交
512
}
Q
Qiao Longfei 已提交
513

Q
qingqing01 已提交
514 515 516 517
framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar(framework::GradVarName("Y"));
  if (var == nullptr) {
K
Kaipeng Deng 已提交
518 519
    PADDLE_THROW(
        platform::errors::InvalidArgument("can't find gradient variable of Y"));
Q
qingqing01 已提交
520 521 522 523 524 525 526 527
  }
  const Tensor *t = nullptr;
  if (var->IsType<Tensor>()) {
    t = &var->Get<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = &var->Get<LoDTensor>();
  }
  if (t == nullptr) {
K
Kaipeng Deng 已提交
528 529
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
Q
qingqing01 已提交
530
  }
531

Q
qingqing01 已提交
532 533 534
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::LibraryType library = framework::LibraryType::kPlain;
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
535

536
#ifdef PADDLE_WITH_MKLDNN
Q
qingqing01 已提交
537 538 539 540 541
  if (library == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library = framework::LibraryType::kMKLDNN;
    layout = framework::DataLayout::kMKLDNN;
  }
542
#endif
543

544 545 546
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace(), layout,
      library);
Q
qingqing01 已提交
547
}
Q
Qiao Longfei 已提交
548

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
framework::OpKernelType BatchNormGradOp::GetKernelTypeForVar(
    const std::string &var_name, const Tensor &tensor,
    const framework::OpKernelType &expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if (((var_name == "X") || (var_name == framework::GradVarName("Y"))) &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
    auto dl = framework::StringToDataLayout(data_layout);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), dl);
    }
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

Q
Qiao Longfei 已提交
574
template <typename T>
Q
QI JUN 已提交
575
class BatchNormGradKernel<platform::CPUDeviceContext, T>
Q
Qiao Longfei 已提交
576 577 578 579 580
    : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
    const auto *scale = ctx.Input<Tensor>("Scale");
K
Kaipeng Deng 已提交
581
    const auto *bias = ctx.Input<Tensor>("Bias");
Q
Qiao Longfei 已提交
582 583 584
    const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
    // SavedVariance have been reverted in forward operator
    const auto *saved_inv_variance = ctx.Input<Tensor>("SavedVariance");
Q
QI JUN 已提交
585
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
586
    const bool use_global_stats = ctx.Attr<bool>("use_global_stats");
587
    const bool is_test = ctx.Attr<bool>("is_test");
588
    const float epsilon = ctx.Attr<float>("epsilon");
Q
QI JUN 已提交
589 590
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
Q
Qiao Longfei 已提交
591

K
Kaipeng Deng 已提交
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
    auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
    auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));

    // batch_norm with inplace as false will take X as grad input, which
    // is same as cuDNN batch_norm backward calculation, batch_norm
    // with inplace as true only take Y as input and X should be calculate
    // by inverse operation of batch_norm on Y
    const Tensor *x;
    bool is_inplace;
    if (ctx.HasInput("Y")) {
      x = ctx.Input<Tensor>("Y");
      is_inplace = true;
      PADDLE_ENFORCE_EQ(d_x, d_y,
                        platform::errors::InvalidArgument(
                            "X@GRAD and Y@GRAD not inplace in inplace mode"));
    } else {
      x = ctx.Input<Tensor>("X");
      is_inplace = false;
      PADDLE_ENFORCE_NE(d_x, d_y,
                        platform::errors::InvalidArgument(
                            "X@GRAD and Y@GRAD inplaced in non-inplace mode"));
    }

616 617 618 619 620 621 622
    PADDLE_ENFORCE_EQ(
        is_test, false,
        platform::errors::InvalidArgument(
            "`is_test = True` CANNOT be used in train program. If "
            "you want to use global status in pre_train model, "
            "please set `use_global_stats = True`"));

Q
Qiao Longfei 已提交
623 624 625
    // Get the size for each dimension.
    // NCHW [batch_size, in_channels, in_height, in_width]
    const auto &x_dims = x->dims();
K
Kaipeng Deng 已提交
626 627 628 629 630 631
    PADDLE_ENFORCE_GE(x_dims.size(), 2,
                      platform::errors::InvalidArgument(
                          "The Input X dim size should be larger than 1."));
    PADDLE_ENFORCE_LE(x_dims.size(), 5,
                      platform::errors::InvalidArgument(
                          "The Input X dim size should be less than 6."));
Q
Qiao Longfei 已提交
632 633
    const int N = x_dims[0];
    const int C =
Q
QI JUN 已提交
634 635
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
Q
Qiao Longfei 已提交
636 637 638 639
    const int sample_size = x->numel() / N / C;

    // init output
    d_x->mutable_data<T>(ctx.GetPlace());
640 641 642 643 644 645 646 647

    const T *mean_data = saved_mean->data<T>();
    const T *inv_var_data = saved_inv_variance->data<T>();
    Tensor inv_var_tensor;
    if (use_global_stats) {
      const auto *running_mean = ctx.Input<Tensor>("Mean");
      const auto *running_variance = ctx.Input<Tensor>("Variance");
      mean_data = running_mean->data<T>();
Z
Zeng Jinle 已提交
648
      inv_var_tensor.Resize({C});
649 650 651 652
      T *running_inv_var_data = inv_var_tensor.mutable_data<T>(ctx.GetPlace());
      EigenVectorArrayMap<T> inv_var_tmp(running_inv_var_data, C);
      ConstEigenVectorArrayMap<T> var_arr(running_variance->data<T>(), C);

653
      inv_var_tmp = (var_arr + epsilon).sqrt().inverse();
654 655 656 657
      inv_var_data = running_inv_var_data;
    }

    ConstEigenVectorArrayMap<T> scale_arr(scale->data<T>(), C);
K
Kaipeng Deng 已提交
658
    ConstEigenVectorArrayMap<T> bias_arr(bias->data<T>(), C);
659 660 661 662 663 664 665 666 667 668 669
    ConstEigenVectorArrayMap<T> mean_arr(mean_data, C);
    ConstEigenVectorArrayMap<T> inv_var_arr(inv_var_data, C);

    T *d_bias_data = nullptr;
    T *d_scale_data = nullptr;
    if (d_scale && d_bias) {
      d_scale->mutable_data<T>(ctx.GetPlace());
      d_bias->mutable_data<T>(ctx.GetPlace());
      d_bias_data = d_bias->mutable_data<T>(ctx.GetPlace());
      d_scale_data = d_scale->mutable_data<T>(ctx.GetPlace());
    }
Q
Qiao Longfei 已提交
670 671 672 673 674

    // d_bias = np.sum(d_y, axis=0)
    // d_scale = np.sum((X - mean) / inv_std * dy, axis=0)
    // d_x = (1. / N) * scale * inv_var * (N * d_y - np.sum(d_y, axis=0)
    //   - (X - mean) * inv_var * inv_var * np.sum(d_y * (X - mean), axis=0))
675 676
    EigenVectorArrayMap<T> d_bias_arr(d_bias_data, C);
    EigenVectorArrayMap<T> d_scale_arr(d_scale_data, C);
Q
Qiao Longfei 已提交
677

678 679 680 681
    if (d_scale && d_bias) {
      d_bias_arr.setZero();
      d_scale_arr.setZero();
    }
Q
Qiao Longfei 已提交
682

683 684
    if ((N * sample_size) == 1 && !use_global_stats) {
      framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
685 686 687
      return;
    }

688 689
    int scale_coefff = use_global_stats ? 1 : N * sample_size;
    const auto scale_inv_var_nhw = scale_arr * inv_var_arr / scale_coefff;
Q
Qiao Longfei 已提交
690

L
lvmengsi 已提交
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
    Tensor dy_sum;
    dy_sum.Resize({C});
    dy_sum.mutable_data<T>(ctx.GetPlace());
    EigenVectorArrayMap<T> dy_sum_arr(dy_sum.mutable_data<T>(ctx.GetPlace()),
                                      C);

    Tensor dy_mul_x_sub_mean_mul_invstd_sum;
    dy_mul_x_sub_mean_mul_invstd_sum.Resize({C});
    dy_mul_x_sub_mean_mul_invstd_sum.mutable_data<T>(ctx.GetPlace());
    EigenVectorArrayMap<T> dy_mul_x_sub_mean_mul_invstd_sum_arr(
        dy_mul_x_sub_mean_mul_invstd_sum.mutable_data<T>(ctx.GetPlace()), C);

    dy_sum_arr.setZero();
    dy_mul_x_sub_mean_mul_invstd_sum_arr.setZero();

K
Kaipeng Deng 已提交
706 707 708 709 710 711 712
    // inplace calculation
    // Y:  ((x - est_mean) * (inv_var) * scale + bias
    //   formula transform ====>
    //   (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
    // X: (y - bias) / scale / (inv_var) + est_mean
    //   formula transform ====>
    //    (y - bias) / (scale * inv_var) + est_mean
Q
QI JUN 已提交
713 714
    switch (data_layout) {
      case DataLayout::kNCHW: {
K
Kaipeng Deng 已提交
715 716 717 718 719 720 721 722 723 724 725
        if (is_inplace) {
          auto px = *x;
          EigenArrayMap<T> x_data(px.mutable_data<T>(ctx.GetPlace()),
                                  sample_size, N * C);
          ConstEigenArrayMap<T> y_data(x->data<T>(), sample_size, N * C);
          for (int nc = 0; nc < N * C; ++nc) {
            x_data.col(nc) = (y_data.col(nc) - bias_arr(nc % C)) /
                                 scale_inv_var_nhw(nc % C) / scale_coefff +
                             mean_arr(nc % C);
          }
        }
Q
Qiao Longfei 已提交
726 727 728 729 730
        ConstEigenArrayMap<T> x_arr(x->data<T>(), sample_size, N * C);
        ConstEigenArrayMap<T> d_y_arr(d_y->data<T>(), sample_size, N * C);
        EigenArrayMap<T> d_x_arr(d_x->mutable_data<T>(ctx.GetPlace()),
                                 sample_size, N * C);

L
lvmengsi 已提交
731 732 733 734 735 736 737 738
        for (int nc = 0; nc < N * C; ++nc) {
          int c = nc % C;
          dy_sum_arr(c) += d_y_arr.col(nc).sum();
          dy_mul_x_sub_mean_mul_invstd_sum_arr(c) +=
              ((x_arr.col(nc) - mean_arr(c)) * inv_var_arr(c) * d_y_arr.col(nc))
                  .sum();
        }

739
        if (d_scale && d_bias) {
L
lvmengsi 已提交
740 741
          d_bias_arr = dy_sum_arr;
          d_scale_arr = dy_mul_x_sub_mean_mul_invstd_sum_arr;
Q
Qiao Longfei 已提交
742
        }
L
lvmengsi 已提交
743

744 745 746
        if (!use_global_stats) {
          for (int nc = 0; nc < N * C; ++nc) {
            int c = nc % C;
K
Kaipeng Deng 已提交
747
            d_x_arr.col(nc) =
748
                scale_inv_var_nhw(c) *
L
lvmengsi 已提交
749 750 751
                (d_y_arr.col(nc) * N * sample_size - dy_sum_arr(c) -
                 (x_arr.col(nc) - mean_arr[c]) *
                     dy_mul_x_sub_mean_mul_invstd_sum_arr(c) * inv_var_arr(c));
752 753 754 755
          }
        } else {
          for (int nc = 0; nc < N * C; ++nc) {
            int c = nc % C;
K
Kaipeng Deng 已提交
756
            d_x_arr.col(nc) = scale_inv_var_nhw(c) * d_y_arr.col(nc);
757
          }
Q
Qiao Longfei 已提交
758 759 760
        }
        break;
      }
Q
QI JUN 已提交
761
      case DataLayout::kNHWC: {
K
Kaipeng Deng 已提交
762 763 764 765 766 767 768 769 770 771 772
        if (is_inplace) {
          auto px = *x;
          EigenArrayMap<T> x_data(px.mutable_data<T>(ctx.GetPlace()), C,
                                  N * sample_size);
          ConstEigenArrayMap<T> y_data(x->data<T>(), C, N * sample_size);
          for (int nhw = 0; nhw < N * sample_size; nhw++) {
            x_data.col(nhw) = (y_data.col(nhw) - bias_arr) / scale_inv_var_nhw /
                                  scale_coefff +
                              mean_arr;
          }
        }
Q
Qiao Longfei 已提交
773 774 775 776 777
        ConstEigenArrayMap<T> x_arr(x->data<T>(), C, N * sample_size);
        ConstEigenArrayMap<T> d_y_arr(d_y->data<T>(), C, N * sample_size);
        EigenArrayMap<T> d_x_arr(d_x->mutable_data<T>(ctx.GetPlace()), C,
                                 N * sample_size);

L
lvmengsi 已提交
778 779 780 781 782
        for (int nhw = 0; nhw < N * sample_size; ++nhw) {
          dy_sum_arr += d_y_arr.col(nhw);
          dy_mul_x_sub_mean_mul_invstd_sum_arr +=
              (x_arr.col(nhw) - mean_arr) * inv_var_arr * d_y_arr.col(nhw);
        }
783 784

        if (d_scale && d_bias) {
L
lvmengsi 已提交
785 786
          d_bias_arr = dy_sum_arr;
          d_scale_arr = dy_mul_x_sub_mean_mul_invstd_sum_arr;
787 788 789 790
        }

        if (!use_global_stats) {
          for (int nhw = 0; nhw < N * sample_size; ++nhw) {
K
Kaipeng Deng 已提交
791
            d_x_arr.col(nhw) =
792
                scale_inv_var_nhw *
L
lvmengsi 已提交
793 794 795
                (d_y_arr.col(nhw) * N * sample_size - dy_sum_arr -
                 (x_arr.col(nhw) - mean_arr) *
                     dy_mul_x_sub_mean_mul_invstd_sum_arr * inv_var_arr);
796 797 798
          }
        } else {
          for (int nhw = 0; nhw < N * sample_size; ++nhw) {
K
Kaipeng Deng 已提交
799
            d_x_arr.col(nhw) = scale_inv_var_nhw * d_y_arr.col(nhw);
800
          }
Q
Qiao Longfei 已提交
801 802 803 804
        }
        break;
      }
      default:
Q
QI JUN 已提交
805
        PADDLE_THROW("Unknown storage order: %s", data_layout_str);
Q
Qiao Longfei 已提交
806 807 808 809
    }
  }
};

H
hong 已提交
810
template <typename T>
811
void BatchNormGradMaker<T>::Apply(GradOpPtr<T> op) const {
812 813 814 815 816 817 818 819
  op->SetType(this->ForwardOpType() + "_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));

  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("Bias", this->Input("Bias"));
  op->SetInput("SavedMean", this->Output("SavedMean"));
  op->SetInput("SavedVariance", this->Output("SavedVariance"));
820 821 822
  if (this->HasOutput("ReserveSpace")) {
    op->SetInput("ReserveSpace", this->Output("ReserveSpace"));
  }
823 824 825 826 827 828

  // used when setting use_global_stats True during training
  if (boost::get<bool>(this->GetAttr("use_global_stats"))) {
    op->SetInput("Mean", this->Output("MeanOut"));
    op->SetInput("Variance", this->Output("VarianceOut"));
  }
829

830
  op->SetAttrMap(this->Attrs());
Y
Yu Yang 已提交
831

832 833 834 835
  op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
  op->SetOutput(framework::GradVarName("Scale"), this->InputGrad("Scale"));
  op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
}
Y
Yu Yang 已提交
836

Q
Qiao Longfei 已提交
837 838 839 840
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yu Yang 已提交
841
REGISTER_OPERATOR(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker,
H
hong 已提交
842 843 844
                  ops::BatchNormOpInferVarType,
                  ops::BatchNormGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormGradMaker<paddle::imperative::OpBase>);
845
REGISTER_OPERATOR(batch_norm_grad, ops::BatchNormGradOp);
Y
Yu Yang 已提交
846

Q
QI JUN 已提交
847
REGISTER_OP_CPU_KERNEL(
D
dzhwinter 已提交
848 849
    batch_norm, ops::BatchNormKernel<paddle::platform::CPUDeviceContext, float>,
    ops::BatchNormKernel<paddle::platform::CPUDeviceContext, double>);
Q
Qiao Longfei 已提交
850 851
REGISTER_OP_CPU_KERNEL(
    batch_norm_grad,
D
dzhwinter 已提交
852 853
    ops::BatchNormGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::BatchNormGradKernel<paddle::platform::CPUDeviceContext, double>);