batch_norm_op.cc 21.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/batch_norm_op.h"
S
Siddharth Goyal 已提交
16
#include <string>
Y
Yi Wang 已提交
17
#include "paddle/fluid/framework/data_layout.h"
18 19 20
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Q
Qiao Longfei 已提交
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48

namespace paddle {
namespace operators {

class BatchNormOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"), "");
    PADDLE_ENFORCE(ctx->HasInput("Scale"), "");
    PADDLE_ENFORCE(ctx->HasInput("Bias"), "");
    PADDLE_ENFORCE(ctx->HasInput("Mean"), "");
    PADDLE_ENFORCE(ctx->HasInput("Variance"), "");
    PADDLE_ENFORCE(ctx->HasOutput("Y"), "");
    PADDLE_ENFORCE(ctx->HasOutput("MeanOut"), "");
    PADDLE_ENFORCE(ctx->HasOutput("VarianceOut"), "");
    PADDLE_ENFORCE(ctx->HasOutput("SavedMean"), "");
    PADDLE_ENFORCE(ctx->HasOutput("SavedVariance"), "");

    // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
    PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0],
                      "Mean and MeanOut should share the same memory");
    PADDLE_ENFORCE_EQ(ctx->Inputs("Variance")[0],
                      ctx->Outputs("VarianceOut")[0],
                      "Variance and VarianceOut should share the same memory");

    const auto x_dims = ctx->GetInputDim("X");
Q
QI JUN 已提交
49 50
    const DataLayout data_layout = framework::StringToDataLayout(
        ctx->Attrs().Get<std::string>("data_layout"));
51 52 53 54

    PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
                   "Input X must have 2 to 5 dimensions.");

Y
Yang Yu 已提交
55
    const int64_t C =
Q
QI JUN 已提交
56 57
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
Q
Qiao Longfei 已提交
58 59 60 61 62 63 64 65 66 67 68

    PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1UL);
    PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale")[0], C);
    PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1UL);
    PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias")[0], C);

    ctx->SetOutputDim("Y", x_dims);
    ctx->SetOutputDim("MeanOut", {C});
    ctx->SetOutputDim("VarianceOut", {C});
    ctx->SetOutputDim("SavedMean", {C});
    ctx->SetOutputDim("SavedVariance", {C});
Y
Yang Yu 已提交
69
    ctx->ShareLoD("X", "Y");
Q
Qiao Longfei 已提交
70
  }
K
Kexin Zhao 已提交
71 72 73

 protected:
  framework::OpKernelType GetExpectedKernelType(
K
update  
Kexin Zhao 已提交
74
      const framework::ExecutionContext &ctx) const override {
M
minqiyang 已提交
75
    auto input_data_type = ctx.Input<Tensor>("X")->type();
D
dzhwinter 已提交
76 77 78
    // By default, the type of the scale, bias, mean,
    // and var tensors should both be float. (For float or float16 input tensor)
    // or double (For double input tensor).
K
Kexin Zhao 已提交
79
    auto bn_param_type = framework::proto::VarType::FP32;
D
dzhwinter 已提交
80 81 82
    if (input_data_type == framework::proto::VarType::FP64) {
      bn_param_type = framework::proto::VarType::FP64;
    }
M
minqiyang 已提交
83
    PADDLE_ENFORCE_EQ(bn_param_type, ctx.Input<Tensor>("Scale")->type(),
K
Kexin Zhao 已提交
84
                      "Scale input should be of float type");
M
minqiyang 已提交
85
    PADDLE_ENFORCE_EQ(bn_param_type, ctx.Input<Tensor>("Bias")->type(),
K
Kexin Zhao 已提交
86
                      "Bias input should be of float type");
M
minqiyang 已提交
87
    PADDLE_ENFORCE_EQ(bn_param_type, ctx.Input<Tensor>("Mean")->type(),
K
Kexin Zhao 已提交
88
                      "Mean input should be of float type");
M
minqiyang 已提交
89
    PADDLE_ENFORCE_EQ(bn_param_type, ctx.Input<Tensor>("Variance")->type(),
K
Kexin Zhao 已提交
90
                      "Variance input should be of float type");
91

M
mozga-intel 已提交
92
    // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
93
    framework::LibraryType library = framework::LibraryType::kPlain;
M
mozga-intel 已提交
94
    framework::DataLayout layout = framework::DataLayout::kAnyLayout;
95
#ifdef PADDLE_WITH_MKLDNN
96
    if (library == framework::LibraryType::kPlain &&
97
        platform::CanMKLDNNBeUsed(ctx)) {
98
      library = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
99
      layout = framework::DataLayout::kMKLDNN;
100 101
    }
#endif
102

103
    return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
104
                                   library);
K
Kexin Zhao 已提交
105
  }
Q
Qiao Longfei 已提交
106 107 108 109
};

class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
110
  void Make() override {
111 112 113 114
    AddAttr<bool>("is_test",
                  "(bool, default false) Set to true for inference only, false "
                  "for training. Some layers may run faster when this is true.")
        .SetDefault(false);
Q
Qiao Longfei 已提交
115
    AddAttr<float>("momentum", "").SetDefault(0.9);
C
chengduoZH 已提交
116 117 118 119 120 121
    AddAttr<float>("epsilon", "")
        .SetDefault(1e-5)
        .AddCustomChecker([](const float &epsilon) {
          PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f,
                         "'epsilon' should be between 0.0 and 0.001.");
        });
Q
QI JUN 已提交
122
    AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
Q
Qiao Longfei 已提交
123 124 125
    AddInput("X", "The input tensor");
    AddInput("Scale",
             "Scale is a 1-dimensional tensor of size C "
126
             "that is applied to the output");
Q
Qiao Longfei 已提交
127 128
    AddInput("Bias",
             "Bias is a 1-dimensional tensor of size C "
129
             "that is applied to the output");
Q
Qiao Longfei 已提交
130
    AddInput("Mean",
131
             "The global mean (for training) or "
Q
Qiao Longfei 已提交
132 133 134
             "estimated mean (for testing)");
    AddInput("Variance",
             "The global variance (for training) "
135
             "or estimated Variance (for testing)");
136
    AddOutput("Y", "result after normalization");
Q
Qiao Longfei 已提交
137 138
    AddOutput("MeanOut",
              "Share memory with Mean. "
139
              "Store the global mean when training");
Q
Qiao Longfei 已提交
140 141
    AddOutput("VarianceOut",
              "Share memory with Variance. "
142
              "Store the global Variance when training");
Q
Qiao Longfei 已提交
143 144
    AddOutput("SavedMean",
              "Mean of the current mini batch, "
Q
Qiao Longfei 已提交
145 146
              "will apply to output when training")
        .AsIntermediate();
Q
Qiao Longfei 已提交
147 148
    AddOutput("SavedVariance",
              "Variance of the current mini batch, "
Q
Qiao Longfei 已提交
149 150
              "will apply to output when training")
        .AsIntermediate();
151 152
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
153 154 155
        .SetDefault(false);
    AddAttr<bool>("fuse_with_relu",
                  "(bool, default false) Only used in mkldnn kernel")
156
        .SetDefault(false);
Q
Qiao Longfei 已提交
157
    AddComment(R"DOC(
158
Batch Normalization.
Q
Qiao Longfei 已提交
159

160 161 162 163 164 165
Batch Norm has been implemented as discussed in the paper:
https://arxiv.org/pdf/1502.03167.pdf
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Q
Qiao Longfei 已提交
166 167 168 169 170

)DOC");
  }
};

C
chengduo 已提交
171 172 173 174 175 176 177 178 179
class BatchNormOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
  std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
      const override {
    return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Y"}};
  }
};

Q
Qiao Longfei 已提交
180
template <typename T>
Q
QI JUN 已提交
181 182
class BatchNormKernel<platform::CPUDeviceContext, T>
    : public framework::OpKernel<T> {
Q
Qiao Longfei 已提交
183 184 185 186 187
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const float epsilon = ctx.Attr<float>("epsilon");
    const float momentum = ctx.Attr<float>("momentum");
    const bool is_test = ctx.Attr<bool>("is_test");
Q
QI JUN 已提交
188 189 190
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
Q
Qiao Longfei 已提交
191 192 193

    const auto *x = ctx.Input<Tensor>("X");
    const auto &x_dims = x->dims();
194 195
    PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
                   "The Input dim size should be between 2 and 5");
Q
Qiao Longfei 已提交
196 197
    const int N = x_dims[0];
    const int C =
Q
QI JUN 已提交
198 199
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
Q
Qiao Longfei 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
    const int sample_size = x->numel() / N / C;

    auto *y = ctx.Output<Tensor>("Y");
    auto *mean_out = ctx.Output<Tensor>("MeanOut");
    auto *variance_out = ctx.Output<Tensor>("VarianceOut");
    auto *saved_mean = ctx.Output<Tensor>("SavedMean");
    auto *saved_variance = ctx.Output<Tensor>("SavedVariance");

    // alloc memory
    y->mutable_data<T>(ctx.GetPlace());
    mean_out->mutable_data<T>(ctx.GetPlace());
    variance_out->mutable_data<T>(ctx.GetPlace());
    saved_mean->mutable_data<T>(ctx.GetPlace());
    saved_variance->mutable_data<T>(ctx.GetPlace());

    if (!is_test) {
      // saved_xx is use just in this batch of data
      EigenVectorArrayMap<T> saved_mean_e(
          saved_mean->mutable_data<T>(ctx.GetPlace()), C);
      EigenVectorArrayMap<T> saved_variance_e(
          saved_variance->mutable_data<T>(ctx.GetPlace()), C);
      saved_mean_e.setZero();
      saved_variance_e.setZero();

224 225 226 227 228 229 230 231 232 233 234 235
      EigenVectorArrayMap<T> running_mean_arr(
          mean_out->mutable_data<T>(ctx.GetPlace()), C);
      EigenVectorArrayMap<T> running_var_arr(
          variance_out->mutable_data<T>(ctx.GetPlace()), C);

      if ((N * sample_size) == 1) {
        LOG(WARNING) << "Only 1 element in normalization dimension, "
                     << "we skip the batch norm calculation, let y = x.";
        framework::TensorCopySync(*x, ctx.GetPlace(), y);
        return;
      }

Q
QI JUN 已提交
236 237
      switch (data_layout) {
        case DataLayout::kNCHW: {
Q
Qiao Longfei 已提交
238 239 240 241 242 243 244 245 246 247 248 249
          ConstEigenArrayMap<T> x_arr(x->data<T>(), sample_size, N * C);
          for (int nc = 0; nc < N * C; ++nc) {
            saved_mean_e(nc % C) += x_arr.col(nc).sum();
          }
          saved_mean_e /= N * sample_size;
          for (int nc = 0; nc < N * C; ++nc) {
            saved_variance_e(nc % C) +=
                (x_arr.col(nc) - saved_mean_e(nc % C)).matrix().squaredNorm();
          }
          saved_variance_e /= N * sample_size;
          break;
        }
Q
QI JUN 已提交
250
        case DataLayout::kNHWC: {
Q
Qiao Longfei 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263
          ConstEigenArrayMap<T> x_arr(x->data<T>(), C, N * sample_size);
          for (int i = 0; i < N * sample_size; ++i) {
            saved_mean_e += x_arr.col(i);
          }
          saved_mean_e /= N * sample_size;
          for (int i = 0; i < N * sample_size; ++i) {
            saved_variance_e +=
                (x_arr.col(i) - saved_mean_e) * (x_arr.col(i) - saved_mean_e);
          }
          saved_variance_e /= N * sample_size;
          break;
        }
        default:
Q
QI JUN 已提交
264
          PADDLE_THROW("Unknown storage order: %s", data_layout_str);
Q
Qiao Longfei 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
      }

      running_mean_arr =
          running_mean_arr * momentum + saved_mean_e * (1. - momentum);
      running_var_arr =
          running_var_arr * momentum + saved_variance_e * (1. - momentum);
    }

    // use SavedMean and SavedVariance to do normalize
    Eigen::Array<T, Eigen::Dynamic, 1> inv_std(C);
    if (is_test) {
      ConstEigenVectorArrayMap<T> var_arr(
          ctx.Input<Tensor>("Variance")->data<T>(), C);
      inv_std = (var_arr + epsilon).sqrt().inverse();
    } else {
      EigenVectorArrayMap<T> saved_inv_std(
          ctx.Output<Tensor>("SavedVariance")->data<T>(), C);
      // inverse SavedVariance first, gradient will use it too.
      saved_inv_std = (saved_inv_std + epsilon).inverse().sqrt();
      inv_std = saved_inv_std;
    }
    ConstEigenVectorArrayMap<T> mean_arr(
        is_test ? ctx.Input<Tensor>("Mean")->data<T>()
                : ctx.Output<Tensor>("SavedMean")->data<T>(),
        C);

    //   ((x - est_mean) * (inv_var) * scale + bias
    //   formula transform ====>
    //   (x * inv_var * scale) + (bias - est_mean * inv_var * scale)
    const auto *scale = ctx.Input<Tensor>("Scale");
    const auto *bias = ctx.Input<Tensor>("Bias");
    ConstEigenVectorArrayMap<T> scale_arr(scale->data<T>(), C);
    ConstEigenVectorArrayMap<T> bias_arr(bias->data<T>(), C);
    Eigen::Array<T, Eigen::Dynamic, 1> new_scale = inv_std * scale_arr;
    Eigen::Array<T, Eigen::Dynamic, 1> new_bias =
        bias_arr - mean_arr * inv_std * scale_arr;

Q
QI JUN 已提交
302 303
    switch (data_layout) {
      case DataLayout::kNCHW: {
Q
Qiao Longfei 已提交
304 305 306 307 308 309 310 311
        EigenArrayMap<T> y_arr(y->mutable_data<T>(ctx.GetPlace()), sample_size,
                               N * C);
        ConstEigenArrayMap<T> x_arr(x->data<T>(), sample_size, N * C);
        for (int nc = 0; nc < N * C; ++nc) {
          y_arr.col(nc) = x_arr.col(nc) * new_scale(nc % C) + new_bias(nc % C);
        }
        break;
      }
Q
QI JUN 已提交
312
      case DataLayout::kNHWC: {
Q
Qiao Longfei 已提交
313 314 315 316 317 318 319 320 321
        EigenArrayMap<T>(y->mutable_data<T>(ctx.GetPlace()), C,
                         N * sample_size) =
            (ConstEigenArrayMap<T>(x->data<T>(), C, N * sample_size).colwise() *
             new_scale)
                .colwise() +
            new_bias;
        break;
      }
      default:
Q
QI JUN 已提交
322
        PADDLE_THROW("Unknown storage order: %d", data_layout);
Q
Qiao Longfei 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
    }
  }
};

class BatchNormGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    // check input
    PADDLE_ENFORCE(ctx->HasInput("X"));
    PADDLE_ENFORCE(ctx->HasInput("Scale"), "");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), "");
    PADDLE_ENFORCE(ctx->HasInput("SavedMean"), "");
    PADDLE_ENFORCE(ctx->HasInput("SavedVariance"), "");

    // check output
    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")), "");
    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Scale")), "");
    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("Bias")), "");

    const auto x_dims = ctx->GetInputDim("X");
Q
QI JUN 已提交
345 346
    const DataLayout data_layout = framework::StringToDataLayout(
        ctx->Attrs().Get<std::string>("data_layout"));
Q
Qiao Longfei 已提交
347
    const int C =
Q
QI JUN 已提交
348 349
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
Q
Qiao Longfei 已提交
350 351 352 353 354

    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
    ctx->SetOutputDim(framework::GradVarName("Scale"), {C});
    ctx->SetOutputDim(framework::GradVarName("Bias"), {C});
  }
Q
Qiao Longfei 已提交
355

Y
Yu Yang 已提交
356
 protected:
357
  framework::OpKernelType GetExpectedKernelType(
Q
Qiao Longfei 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371
      const framework::ExecutionContext &ctx) const override {
    const auto *var = ctx.InputVar(framework::GradVarName("Y"));
    if (var == nullptr) {
      PADDLE_THROW("can't find Y@GRAD");
    }
    const Tensor *t = nullptr;
    if (var->IsType<Tensor>()) {
      t = &var->Get<Tensor>();
    } else if (var->IsType<LoDTensor>()) {
      t = &var->Get<LoDTensor>();
    }
    if (t == nullptr) {
      PADDLE_THROW("can't find Y@GRAD");
    }
372

M
mozga-intel 已提交
373
    // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
374 375 376
    framework::LibraryType library = framework::LibraryType::kPlain;
    framework::DataLayout layout = framework::DataLayout::kAnyLayout;

377
#ifdef PADDLE_WITH_MKLDNN
378
    if (library == framework::LibraryType::kPlain &&
379
        platform::CanMKLDNNBeUsed(ctx)) {
380 381
      library = framework::LibraryType::kMKLDNN;
      layout = framework::DataLayout::kMKLDNN;
382 383
    }
#endif
384

M
minqiyang 已提交
385 386
    return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
                                   ctx.GetPlace(), layout, library);
Q
Qiao Longfei 已提交
387
  }
Q
Qiao Longfei 已提交
388 389 390
};

template <typename T>
Q
QI JUN 已提交
391
class BatchNormGradKernel<platform::CPUDeviceContext, T>
Q
Qiao Longfei 已提交
392 393 394 395 396 397 398 399 400
    : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext &ctx) const override {
    const auto *x = ctx.Input<Tensor>("X");
    const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
    const auto *scale = ctx.Input<Tensor>("Scale");
    const auto *saved_mean = ctx.Input<Tensor>("SavedMean");
    // SavedVariance have been reverted in forward operator
    const auto *saved_inv_variance = ctx.Input<Tensor>("SavedVariance");
Q
QI JUN 已提交
401 402 403
    const std::string data_layout_str = ctx.Attr<std::string>("data_layout");
    const DataLayout data_layout =
        framework::StringToDataLayout(data_layout_str);
Q
Qiao Longfei 已提交
404 405 406 407

    // Get the size for each dimension.
    // NCHW [batch_size, in_channels, in_height, in_width]
    const auto &x_dims = x->dims();
408 409
    PADDLE_ENFORCE(x_dims.size() >= 2 && x_dims.size() <= 5,
                   "The Input dim size should be between 2 and 5");
Q
Qiao Longfei 已提交
410 411
    const int N = x_dims[0];
    const int C =
Q
QI JUN 已提交
412 413
        (data_layout == DataLayout::kNCHW ? x_dims[1]
                                          : x_dims[x_dims.size() - 1]);
Q
Qiao Longfei 已提交
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
    const int sample_size = x->numel() / N / C;

    ConstEigenVectorArrayMap<T> scale_arr(scale->data<T>(), C);
    ConstEigenVectorArrayMap<T> mean_arr(saved_mean->data<T>(), C);
    ConstEigenVectorArrayMap<T> inv_var_arr(saved_inv_variance->data<T>(), C);

    // init output
    auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
    auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
    auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));

    d_x->mutable_data<T>(ctx.GetPlace());
    d_scale->mutable_data<T>(ctx.GetPlace());
    d_bias->mutable_data<T>(ctx.GetPlace());

    // d_bias = np.sum(d_y, axis=0)
    // d_scale = np.sum((X - mean) / inv_std * dy, axis=0)
    // d_x = (1. / N) * scale * inv_var * (N * d_y - np.sum(d_y, axis=0)
    //   - (X - mean) * inv_var * inv_var * np.sum(d_y * (X - mean), axis=0))

    EigenVectorArrayMap<T> d_bias_arr(d_bias->mutable_data<T>(ctx.GetPlace()),
                                      C);
    EigenVectorArrayMap<T> d_scale_arr(d_scale->mutable_data<T>(ctx.GetPlace()),
                                       C);

    d_bias_arr.setZero();
    d_scale_arr.setZero();

442 443 444 445 446
    if ((N * sample_size) == 1) {
      framework::TensorCopySync(*d_y, ctx.GetPlace(), d_x);
      return;
    }

Q
Qiao Longfei 已提交
447 448
    const auto scale_inv_var_nhw = scale_arr * inv_var_arr / (N * sample_size);

Q
QI JUN 已提交
449 450
    switch (data_layout) {
      case DataLayout::kNCHW: {
Q
Qiao Longfei 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
        ConstEigenArrayMap<T> x_arr(x->data<T>(), sample_size, N * C);
        ConstEigenArrayMap<T> d_y_arr(d_y->data<T>(), sample_size, N * C);
        EigenArrayMap<T> d_x_arr(d_x->mutable_data<T>(ctx.GetPlace()),
                                 sample_size, N * C);
        d_x_arr.setZero();

        for (int nc = 0; nc < N * C; ++nc) {
          int c = nc % C;
          d_bias_arr(c) += d_y_arr.col(nc).sum();
          d_scale_arr(c) +=
              ((x_arr.col(nc) - mean_arr(c)) * inv_var_arr(c) * d_y_arr.col(nc))
                  .sum();
        }
        for (int nc = 0; nc < N * C; ++nc) {
          int c = nc % C;
          d_x_arr.col(nc) +=
              scale_inv_var_nhw(c) *
              (d_y_arr.col(nc) * N * sample_size - d_bias_arr(c) -
               (x_arr.col(nc) - mean_arr[c]) * d_scale_arr(c) * inv_var_arr(c));
        }
        break;
      }
Q
QI JUN 已提交
473
      case DataLayout::kNHWC: {
Q
Qiao Longfei 已提交
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
        ConstEigenArrayMap<T> x_arr(x->data<T>(), C, N * sample_size);
        ConstEigenArrayMap<T> d_y_arr(d_y->data<T>(), C, N * sample_size);
        EigenArrayMap<T> d_x_arr(d_x->mutable_data<T>(ctx.GetPlace()), C,
                                 N * sample_size);
        d_x_arr.setZero();

        const auto d_y_row_sum = d_y_arr.rowwise().sum();
        const auto x_minus_mean = x_arr.colwise() - mean_arr;
        const auto d_y_mul_x_minus_mean_row_sum =
            (d_y_arr * x_minus_mean).rowwise().sum();
        const auto inv_var_sqr = inv_var_arr * inv_var_arr;
        for (int nhw = 0; nhw < N * sample_size; ++nhw) {
          d_bias_arr += d_y_arr.col(nhw);
          d_scale_arr +=
              (x_arr.col(nhw) - mean_arr) * inv_var_arr * d_y_arr.col(nhw);
          d_x_arr.col(nhw) +=
              scale_inv_var_nhw *
              (d_y_arr.col(nhw) * N * sample_size - d_y_row_sum -
               x_minus_mean.col(nhw) * inv_var_sqr *
                   d_y_mul_x_minus_mean_row_sum);
        }
        break;
      }
      default:
Q
QI JUN 已提交
498
        PADDLE_THROW("Unknown storage order: %s", data_layout_str);
Q
Qiao Longfei 已提交
499 500 501 502
    }
  }
};

Y
Yu Yang 已提交
503 504 505 506 507 508 509 510 511 512 513 514
class BatchNormGradMaker : public framework::SingleGradOpDescMaker {
 public:
  using framework::SingleGradOpDescMaker::SingleGradOpDescMaker;

 protected:
  std::unique_ptr<framework::OpDesc> Apply() const override {
    auto *op = new framework::OpDesc();
    op->SetType("batch_norm_grad");
    op->SetInput("X", Input("X"));
    op->SetInput(framework::GradVarName("Y"), OutputGrad("Y"));

    op->SetInput("Scale", Input("Scale"));
515
    op->SetInput("Bias", Input("Bias"));
Y
Yu Yang 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528
    op->SetInput("SavedMean", Output("SavedMean"));
    op->SetInput("SavedVariance", Output("SavedVariance"));

    op->SetAttrMap(Attrs());

    op->SetOutput(framework::GradVarName("X"), InputGrad("X"));
    op->SetOutput(framework::GradVarName("Scale"), InputGrad("Scale"));
    op->SetOutput(framework::GradVarName("Bias"), InputGrad("Bias"));

    return std::unique_ptr<framework::OpDesc>(op);
  }
};

Q
Qiao Longfei 已提交
529 530 531 532
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yu Yang 已提交
533
REGISTER_OPERATOR(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker,
C
chengduo 已提交
534
                  ops::BatchNormOpInferVarType, ops::BatchNormGradMaker);
Y
Yu Yang 已提交
535 536
REGISTER_OPERATOR(batch_norm_grad, ops::BatchNormGradOp);

Q
QI JUN 已提交
537
REGISTER_OP_CPU_KERNEL(
D
dzhwinter 已提交
538 539
    batch_norm, ops::BatchNormKernel<paddle::platform::CPUDeviceContext, float>,
    ops::BatchNormKernel<paddle::platform::CPUDeviceContext, double>);
Q
Qiao Longfei 已提交
540 541
REGISTER_OP_CPU_KERNEL(
    batch_norm_grad,
D
dzhwinter 已提交
542 543
    ops::BatchNormGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::BatchNormGradKernel<paddle::platform::CPUDeviceContext, double>);