batch_norm_op.cc 22.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/batch_norm_op.h"
16

Q
qingqing01 已提交
17
#include <memory>
S
Siddharth Goyal 已提交
18
#include <string>
Q
qingqing01 已提交
19
#include <unordered_map>
20

Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/data_layout.h"
22 23 24
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Q
Qiao Longfei 已提交
25

H
hong 已提交
26 27 28
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/infermeta/multiary.h"

Q
Qiao Longfei 已提交
29 30 31
namespace paddle {
namespace operators {

Q
qingqing01 已提交
32
void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
33 34 35 36 37 38 39
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Mean"), "Input", "Mean", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "Variance", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BatchNorm");

Q
qingqing01 已提交
40
  bool is_test = ctx->Attrs().Get<bool>("is_test");
41 42 43
  bool trainable_stats = ctx->Attrs().Get<bool>("trainable_statistics");
  bool test_mode = is_test && (!trainable_stats);
  if (!test_mode) {
44
    OP_INOUT_CHECK(ctx->HasOutput("MeanOut"), "Output", "MeanOut", "BatchNorm");
45 46 47 48 49 50 51
    OP_INOUT_CHECK(
        ctx->HasOutput("VarianceOut"), "Output", "VarianceOut", "BatchNorm");
    OP_INOUT_CHECK(
        ctx->HasOutput("SavedMean"), "Output", "SavedMean", "BatchNorm");
    OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"),
                   "Output",
                   "SavedVariance",
52
                   "BatchNorm");
Q
Qiao Longfei 已提交
53
  }
K
Kexin Zhao 已提交
54

Q
qingqing01 已提交
55
  // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
56 57
  PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0],
                    ctx->Outputs("MeanOut")[0],
58 59 60
                    platform::errors::InvalidArgument(
                        "Mean and MeanOut should share the same memory"));
  PADDLE_ENFORCE_EQ(
61 62
      ctx->Inputs("Variance")[0],
      ctx->Outputs("VarianceOut")[0],
63 64
      platform::errors::InvalidArgument(
          "Variance and VarianceOut should share the same memory"));
Q
qingqing01 已提交
65 66

  const auto x_dims = ctx->GetInputDim("X");
67 68 69

  for (int i = 0; i < x_dims.size(); i++) {
    PADDLE_ENFORCE_EQ(
70 71
        (x_dims[i] == -1) || (x_dims[i] > 0),
        true,
72 73
        platform::errors::InvalidArgument(
            "Each dimension of input tensor is expected to be -1 or a "
74
            "positive number, but received %d. Input's shape is [%s].",
75 76
            x_dims[i],
            x_dims));
77 78
  }

Q
qingqing01 已提交
79 80 81
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));

82 83
  if (ctx->IsRuntime() && ctx->HasInput("MomentumTensor")) {
    auto mom = ctx->Inputs("MomentumTensor");
84 85
    PADDLE_ENFORCE_EQ(mom.size(),
                      1,
86
                      platform::errors::InvalidArgument(
C
ceci3 已提交
87 88 89
                          "The input tensor MomentumTensor's size must be 1"
                          "But received: MomentumTensor's size is [%d]",
                          mom.size()));
90 91
  }

92
  PADDLE_ENFORCE_GE(
93 94
      x_dims.size(),
      2,
K
Kaipeng Deng 已提交
95 96 97 98
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input "
          "X must greater than or equal to 2. But received: the shape of input "
          "X = [%s], the dimension of input X =[%d]",
99 100
          x_dims,
          x_dims.size()));
101
  PADDLE_ENFORCE_LE(
102 103
      x_dims.size(),
      5,
K
Kaipeng Deng 已提交
104 105 106 107
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input X "
          "must smaller than or equal to 5. But received: the shape of input X "
          "= [%s], the dimension of input X = [%d]",
108 109
          x_dims,
          x_dims.size()));
110 111
  VLOG(4) << ctx->IsRunMKLDNNKernel();
  VLOG(4) << data_layout;
Q
qingqing01 已提交
112
  const int64_t C =
113
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
114 115
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);
Q
qingqing01 已提交
116

117 118
  auto scale_dim = ctx->GetInputDim("Scale");
  auto bias_dim = ctx->GetInputDim("Bias");
Q
qingqing01 已提交
119

120
  PADDLE_ENFORCE_EQ(
121 122
      scale_dim.size(),
      1UL,
123 124 125 126
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of scale must equal to 1."
          "But received: the shape of scale is [%s], the dimension "
          "of scale is [%d]",
127 128 129 130
          scale_dim,
          scale_dim.size()));
  PADDLE_ENFORCE_EQ(bias_dim.size(),
                    1UL,
131 132 133 134
                    platform::errors::InvalidArgument(
                        "ShapeError: the dimension of bias must equal to 1."
                        "But received: the shape of bias is [%s],the dimension "
                        "of bias is [%d]",
135 136
                        bias_dim,
                        bias_dim.size()));
C
ceci3 已提交
137

138
  bool check = true;
139
  if ((!ctx->IsRuntime()) &&
140
      (phi::product(scale_dim) <= 0 || phi::product(bias_dim) <= 0)) {
141 142 143 144
    check = false;
  }

  if (check) {
145 146
    PADDLE_ENFORCE_EQ(scale_dim[0],
                      C,
147 148 149
                      platform::errors::InvalidArgument(
                          "ShapeError: the shape of scale must equal to [%d]"
                          "But received: the shape of scale is [%d]",
150 151 152 153
                          C,
                          scale_dim[0]));
    PADDLE_ENFORCE_EQ(bias_dim[0],
                      C,
154 155 156
                      platform::errors::InvalidArgument(
                          "ShapeError: the shape of bias must equal to [%d]"
                          "But received: the shape of bias is [%d]",
157 158
                          C,
                          bias_dim[0]));
159
  }
Q
qingqing01 已提交
160
  ctx->SetOutputDim("Y", x_dims);
161
  VLOG(4) << x_dims;
Q
qingqing01 已提交
162 163 164 165 166 167 168 169 170
  ctx->SetOutputDim("MeanOut", {C});
  ctx->SetOutputDim("VarianceOut", {C});
  ctx->SetOutputDim("SavedMean", {C});
  ctx->SetOutputDim("SavedVariance", {C});
  ctx->ShareLoD("X", "Y");
}

framework::OpKernelType BatchNormOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
171
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
Q
qingqing01 已提交
172 173 174 175 176 177 178
  // By default, the type of the scale, bias, mean,
  // and var tensors should both be float. (For float or float16 input tensor)
  // or double (For double input tensor).
  auto bn_param_type = framework::proto::VarType::FP32;
  if (input_data_type == framework::proto::VarType::FP64) {
    bn_param_type = framework::proto::VarType::FP64;
  }
K
Kaipeng Deng 已提交
179
  PADDLE_ENFORCE_EQ(
180 181
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Scale")->dtype()),
K
Kaipeng Deng 已提交
182 183
      platform::errors::InvalidArgument("Scale input should be of float type"));
  PADDLE_ENFORCE_EQ(
184 185
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Bias")->dtype()),
K
Kaipeng Deng 已提交
186 187
      platform::errors::InvalidArgument("Bias input should be of float type"));
  PADDLE_ENFORCE_EQ(
188 189
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Mean")->dtype()),
K
Kaipeng Deng 已提交
190
      platform::errors::InvalidArgument("Mean input should be of float type"));
191 192 193 194 195
  PADDLE_ENFORCE_EQ(
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Variance")->dtype()),
      platform::errors::InvalidArgument(
          "Variance input should be of float type"));
Q
qingqing01 已提交
196 197 198 199

  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::LibraryType library = framework::LibraryType::kPlain;
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
200
#ifdef PADDLE_WITH_MKLDNN
201 202
  if (library == framework::LibraryType::kPlain &&
      this->CanMKLDNNBeUsed(ctx, input_data_type)) {
Q
qingqing01 已提交
203 204
    library = framework::LibraryType::kMKLDNN;
    layout = framework::DataLayout::kMKLDNN;
K
Kexin Zhao 已提交
205
  }
Q
qingqing01 已提交
206
#endif
Q
Qiao Longfei 已提交
207

208 209
  return framework::OpKernelType(
      input_data_type, ctx.GetPlace(), layout, library);
Q
qingqing01 已提交
210 211
}

212
framework::OpKernelType BatchNormOp::GetKernelTypeForVar(
213 214
    const std::string &var_name,
    const Tensor &tensor,
215 216 217 218 219 220 221 222 223 224 225 226 227 228
    const framework::OpKernelType &expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "X") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
    auto dl = framework::StringToDataLayout(data_layout);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
229 230
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(), dl);
231 232 233
    }
  }
#endif
234 235
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
236 237
}

Q
qingqing01 已提交
238 239 240 241 242 243 244 245 246
void BatchNormOpMaker::Make() {
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
  AddAttr<float>("momentum", "").SetDefault(0.9);
  AddAttr<float>("epsilon", "")
      .SetDefault(1e-5)
      .AddCustomChecker([](const float &epsilon) {
K
Kaipeng Deng 已提交
247
        PADDLE_ENFORCE_GE(
248 249
            epsilon,
            0.0f,
K
Kaipeng Deng 已提交
250 251
            platform::errors::InvalidArgument(
                "'epsilon' should be greater or equal than 0.0."));
252 253
        PADDLE_ENFORCE_LE(epsilon,
                          0.001f,
K
Kaipeng Deng 已提交
254 255
                          platform::errors::InvalidArgument(
                              "'epsilon' should be less or equal than 0.001."));
Q
qingqing01 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
      });
  AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
  AddInput("X", "The input tensor");
  AddInput("Scale",
           "Scale is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Bias",
           "Bias is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Mean",
           "The global mean (for training) or "
           "estimated mean (for testing)");
  AddInput("Variance",
           "The global variance (for training) "
           "or estimated Variance (for testing)");
271 272 273 274 275
  AddInput("MomentumTensor",
           "(Tensor<float32>, optional) If provided, batch_norm will "
           "use this as momentum, this has a higher priority than "
           "attr(momentum), the shape of this tensor MUST BE [1].")
      .AsDispensable();
Q
qingqing01 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
  AddOutput("Y", "result after normalization");
  AddOutput("MeanOut",
            "Share memory with Mean. "
            "Store the global mean when training");
  AddOutput("VarianceOut",
            "Share memory with Variance. "
            "Store the global Variance when training");
  AddOutput("SavedMean",
            "Mean of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
  AddOutput("SavedVariance",
            "Variance of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
291 292 293
  AddOutput("ReserveSpace",
            "Reserve GPU space for triggering the new semi-persistent "
            "NHWC kernel")
C
ceci3 已提交
294 295
      .AsDispensable()
      .AsExtra();
Q
qingqing01 已提交
296 297
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
C
ceci3 已提交
298 299
      .SetDefault(false)
      .AsExtra();
Q
qingqing01 已提交
300 301
  AddAttr<bool>("fuse_with_relu",
                "(bool, default false) Only used in mkldnn kernel")
C
ceci3 已提交
302 303
      .SetDefault(false)
      .AsExtra();
Q
qingqing01 已提交
304 305 306 307 308 309 310 311
  AddAttr<bool>("use_global_stats",
                "(bool, default false) Whether to use global mean and "
                "variance. In inference or test mode, set use_global_stats "
                "to true or is_test true. the behavior is equivalent. "
                "In train mode, when setting use_global_stats True, the "
                "global mean and variance are also used during train time, "
                "the BN acts as scaling and shiffting.")
      .SetDefault(false);
312 313 314 315 316
  AddAttr<bool>("trainable_statistics",
                "(bool, default false) Whether to calculate mean and variance "
                "in test mode. If setting true in test mode, mean and variace "
                "will be calculated by current batch statistics.")
      .SetDefault(false);
Q
qingqing01 已提交
317
  AddComment(R"DOC(
318
Batch Normalization.
Q
Qiao Longfei 已提交
319

320 321 322 323 324 325
Batch Norm has been implemented as discussed in the paper:
https://arxiv.org/pdf/1502.03167.pdf
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Q
Qiao Longfei 已提交
326 327

)DOC");
Q
qingqing01 已提交
328
}
C
chengduo 已提交
329

Q
qingqing01 已提交
330 331
void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
  // check input
332
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNormGrad");
333 334 335
  OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")),
                 "Input",
                 framework::GradVarName("Y"),
336
                 "BatchNormGrad");
337 338 339 340 341
  OP_INOUT_CHECK(
      ctx->HasInput("SavedMean"), "Input", "SavedMean", "BatchNormGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedVariance"),
                 "Input",
                 "SavedVariance",
342
                 "BatchNormGrad");
Q
qingqing01 已提交
343 344

  // check output
345 346
  const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale"));
  const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias"));
347
  const bool has_x_grad = ctx->HasOutput(framework::GradVarName("X"));
348

349 350
  PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad),
                    true,
351
                    platform::errors::NotFound(
352 353 354
                        "Output(Scale@GRAD) and Output(Bias@GRAD) must be null "
                        "or not be null at same time. But now, "
                        "has Scale@Grad=[%d], has Bias@GRAD=[%d]",
355 356
                        has_scale_grad,
                        has_bias_grad));
357

Q
qingqing01 已提交
358 359
  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
K
Kaipeng Deng 已提交
360
    PADDLE_ENFORCE_EQ(
361 362
        !ctx->Attrs().Get<bool>("use_mkldnn"),
        true,
K
Kaipeng Deng 已提交
363 364 365
        platform::errors::InvalidArgument(
            "Using global stats during training is not supported "
            "in gradient op kernel of batch_norm_mkldnn_op now."));
Q
qingqing01 已提交
366
  }
Q
Qiao Longfei 已提交
367

368 369 370 371
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNormGrad");
  const auto x_dims = ctx->GetInputDim("X");
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));
Q
Qiao Longfei 已提交
372

373
  const int C =
374
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
375 376 377 378 379 380 381
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);

  // has_scale_grad == has_bias_grad, judge has_scale_grad is enough
  if (has_scale_grad) {
    ctx->SetOutputDim(framework::GradVarName("Scale"), {C});
    ctx->SetOutputDim(framework::GradVarName("Bias"), {C});
Q
Qiao Longfei 已提交
382
  }
383 384 385
  if (has_x_grad) {
    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
  }
Q
qingqing01 已提交
386
}
Q
Qiao Longfei 已提交
387

Q
qingqing01 已提交
388 389 390 391
framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar(framework::GradVarName("Y"));
  if (var == nullptr) {
K
Kaipeng Deng 已提交
392 393
    PADDLE_THROW(
        platform::errors::InvalidArgument("can't find gradient variable of Y"));
Q
qingqing01 已提交
394 395 396 397 398 399 400 401
  }
  const Tensor *t = nullptr;
  if (var->IsType<Tensor>()) {
    t = &var->Get<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = &var->Get<LoDTensor>();
  }
  if (t == nullptr) {
K
Kaipeng Deng 已提交
402 403
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
Q
qingqing01 已提交
404
  }
405

Q
qingqing01 已提交
406 407 408
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::LibraryType library = framework::LibraryType::kPlain;
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
409
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
410

411
#ifdef PADDLE_WITH_MKLDNN
412 413
  if (library == framework::LibraryType::kPlain &&
      this->CanMKLDNNBeUsed(ctx, data_type)) {
Q
qingqing01 已提交
414 415 416
    library = framework::LibraryType::kMKLDNN;
    layout = framework::DataLayout::kMKLDNN;
  }
417
#endif
418

419
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
Q
qingqing01 已提交
420
}
Q
Qiao Longfei 已提交
421

422
framework::OpKernelType BatchNormGradOp::GetKernelTypeForVar(
423 424
    const std::string &var_name,
    const Tensor &tensor,
425 426 427 428 429 430 431 432 433 434 435 436 437 438
    const framework::OpKernelType &expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if (((var_name == "X") || (var_name == framework::GradVarName("Y"))) &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
    auto dl = framework::StringToDataLayout(data_layout);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
439 440
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(), dl);
441 442 443
    }
  }
#endif
444 445
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
446 447
}

H
hong 已提交
448
template <typename T>
449
void BatchNormGradMaker<T>::Apply(GradOpPtr<T> op) const {
450 451 452 453 454 455 456 457
  op->SetType(this->ForwardOpType() + "_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));

  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("Bias", this->Input("Bias"));
  op->SetInput("SavedMean", this->Output("SavedMean"));
  op->SetInput("SavedVariance", this->Output("SavedVariance"));
458 459 460
  if (this->HasOutput("ReserveSpace")) {
    op->SetInput("ReserveSpace", this->Output("ReserveSpace"));
  }
461 462

  // used when setting use_global_stats True during training
R
Ruibiao Chen 已提交
463 464
  if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats")) ||
      PADDLE_GET_CONST(bool, this->GetAttr("is_test"))) {
465 466 467
    op->SetInput("Mean", this->Output("MeanOut"));
    op->SetInput("Variance", this->Output("VarianceOut"));
  }
468

469
  op->SetAttrMap(this->Attrs());
Y
Yu Yang 已提交
470

471 472 473 474
  op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
  op->SetOutput(framework::GradVarName("Scale"), this->InputGrad("Scale"));
  op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
}
Y
Yu Yang 已提交
475

476 477 478 479 480 481 482
template <typename T>
void BatchNormDoubleGradMaker<T>::Apply(GradOpPtr<T> op) const {
  op->SetType("batch_norm_grad_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("SavedMean", this->Input("SavedMean"));
  op->SetInput("SavedVariance", this->Input("SavedVariance"));
R
Ruibiao Chen 已提交
483
  if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats"))) {
484
    op->SetInput("Mean", this->Input("Mean"));
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
    op->SetInput("Variance", this->Input("Variance"));
  }
  op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
  op->SetInput("DDScale", this->OutputGrad(framework::GradVarName("Scale")));
  op->SetInput("DDBias", this->OutputGrad(framework::GradVarName("Bias")));
  op->SetInput("DY", this->Input(framework::GradVarName("Y")));

  op->SetAttrMap(this->Attrs());
  op->SetOutput("DX", this->InputGrad("X"));
  op->SetOutput("DScale", this->InputGrad("Scale"));
  op->SetOutput("DDY", this->InputGrad(framework::GradVarName("Y")));
}

void BatchNormDoubleGradOp::InferShape(
    framework::InferShapeContext *ctx) const {
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNormDoubleGrad");
501 502 503 504 505 506 507
  OP_INOUT_CHECK(
      ctx->HasInput("Scale"), "Input", "Scale", "BatchNormDoubleGrad");
  OP_INOUT_CHECK(
      ctx->HasInput("SavedMean"), "Input", "SavedMean", "BatchNormDoubleGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedVariance"),
                 "Input",
                 "SavedVariance",
508 509 510 511
                 "BatchNormDoubleGrad");

  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
512 513 514
    OP_INOUT_CHECK(ctx->HasInput("Variance"),
                   "Input",
                   "VarianceOut",
515 516 517 518 519 520 521 522 523
                   "BatchNormDoubleGrad");
  }

  OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "BatchNormDoubleGrad");

  // check output
  OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX", "BatchNormDoubleGrad");

  const auto x_dims = ctx->GetInputDim("X");
524 525 526
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));
  const int C =
527
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
528 529 530
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
  if (ctx->HasOutput("DX")) {
    ctx->SetOutputDim("DX", x_dims);
  }
  if (ctx->HasOutput("DScale")) {
    ctx->SetOutputDim("DScale", {C});
  }
  if (ctx->HasOutput("DDY")) {
    ctx->ShareDim("X", "DDY");
  }
}

framework::OpKernelType BatchNormDoubleGradOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar("DY");
  if (var == nullptr) {
    PADDLE_THROW(
        platform::errors::NotFound("cannot find gradient variable of Y"));
  }
  const Tensor *t = nullptr;
  if (var->IsType<Tensor>()) {
    t = &var->Get<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = &var->Get<LoDTensor>();
  }
  if (t == nullptr) {
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
  }
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
}

DECLARE_INPLACE_OP_INFERER(BatchNormDoubleGradOpInplaceInferer, {"DY", "DDY"});

Q
Qiao Longfei 已提交
565 566 567 568
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
569

570 571
DECLARE_INFER_SHAPE_FUNCTOR(batch_norm,
                            BatchNormInferShapeFunctor,
H
hong 已提交
572 573
                            PD_INFER_META(phi::BatchNormInferMeta));

574 575 576
REGISTER_OPERATOR(batch_norm,
                  ops::BatchNormOp,
                  ops::BatchNormOpMaker,
H
hong 已提交
577 578 579
                  ops::BatchNormOpInferVarType,
                  ops::BatchNormGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormGradMaker<paddle::imperative::OpBase>);
580 581
REGISTER_OPERATOR(batch_norm_grad,
                  ops::BatchNormGradOp,
582 583
                  ops::BatchNormDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormDoubleGradMaker<paddle::imperative::OpBase>);
584 585
REGISTER_OPERATOR(batch_norm_grad_grad,
                  ops::BatchNormDoubleGradOp,
586
                  ops::BatchNormDoubleGradOpInplaceInferer);