batch_norm_op.cc 22.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/batch_norm_op.h"
Q
qingqing01 已提交
16
#include <memory>
S
Siddharth Goyal 已提交
17
#include <string>
Q
qingqing01 已提交
18
#include <unordered_map>
Y
Yi Wang 已提交
19
#include "paddle/fluid/framework/data_layout.h"
20 21 22
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Q
Qiao Longfei 已提交
23

H
hong 已提交
24 25 26
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/infermeta/multiary.h"

Q
Qiao Longfei 已提交
27 28 29
namespace paddle {
namespace operators {

Q
qingqing01 已提交
30
void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
31 32 33 34 35 36 37
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Mean"), "Input", "Mean", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "Variance", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BatchNorm");

Q
qingqing01 已提交
38
  bool is_test = ctx->Attrs().Get<bool>("is_test");
39 40 41
  bool trainable_stats = ctx->Attrs().Get<bool>("trainable_statistics");
  bool test_mode = is_test && (!trainable_stats);
  if (!test_mode) {
42 43 44 45 46 47 48
    OP_INOUT_CHECK(ctx->HasOutput("MeanOut"), "Output", "MeanOut", "BatchNorm");
    OP_INOUT_CHECK(ctx->HasOutput("VarianceOut"), "Output", "VarianceOut",
                   "BatchNorm");
    OP_INOUT_CHECK(ctx->HasOutput("SavedMean"), "Output", "SavedMean",
                   "BatchNorm");
    OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"), "Output", "SavedVariance",
                   "BatchNorm");
Q
Qiao Longfei 已提交
49
  }
K
Kexin Zhao 已提交
50

Q
qingqing01 已提交
51 52
  // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
  PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0], ctx->Outputs("MeanOut")[0],
53 54 55 56 57 58
                    platform::errors::InvalidArgument(
                        "Mean and MeanOut should share the same memory"));
  PADDLE_ENFORCE_EQ(
      ctx->Inputs("Variance")[0], ctx->Outputs("VarianceOut")[0],
      platform::errors::InvalidArgument(
          "Variance and VarianceOut should share the same memory"));
Q
qingqing01 已提交
59 60

  const auto x_dims = ctx->GetInputDim("X");
61 62 63 64 65 66

  for (int i = 0; i < x_dims.size(); i++) {
    PADDLE_ENFORCE_EQ(
        (x_dims[i] == -1) || (x_dims[i] > 0), true,
        platform::errors::InvalidArgument(
            "Each dimension of input tensor is expected to be -1 or a "
67
            "positive number, but received %d. Input's shape is [%s].",
68 69 70
            x_dims[i], x_dims));
  }

Q
qingqing01 已提交
71 72 73
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));

74 75 76 77
  if (ctx->IsRuntime() && ctx->HasInput("MomentumTensor")) {
    auto mom = ctx->Inputs("MomentumTensor");
    PADDLE_ENFORCE_EQ(mom.size(), 1,
                      platform::errors::InvalidArgument(
C
ceci3 已提交
78 79 80
                          "The input tensor MomentumTensor's size must be 1"
                          "But received: MomentumTensor's size is [%d]",
                          mom.size()));
81 82
  }

83 84
  PADDLE_ENFORCE_GE(
      x_dims.size(), 2,
K
Kaipeng Deng 已提交
85 86 87 88 89
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input "
          "X must greater than or equal to 2. But received: the shape of input "
          "X = [%s], the dimension of input X =[%d]",
          x_dims, x_dims.size()));
90 91
  PADDLE_ENFORCE_LE(
      x_dims.size(), 5,
K
Kaipeng Deng 已提交
92 93 94 95 96
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input X "
          "must smaller than or equal to 5. But received: the shape of input X "
          "= [%s], the dimension of input X = [%d]",
          x_dims, x_dims.size()));
97 98
  VLOG(4) << ctx->IsRunMKLDNNKernel();
  VLOG(4) << data_layout;
Q
qingqing01 已提交
99
  const int64_t C =
100
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
101 102
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);
Q
qingqing01 已提交
103

104 105
  auto scale_dim = ctx->GetInputDim("Scale");
  auto bias_dim = ctx->GetInputDim("Bias");
Q
qingqing01 已提交
106

107
  PADDLE_ENFORCE_EQ(
108 109 110 111 112 113 114 115 116 117 118 119
      scale_dim.size(), 1UL,
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of scale must equal to 1."
          "But received: the shape of scale is [%s], the dimension "
          "of scale is [%d]",
          scale_dim, scale_dim.size()));
  PADDLE_ENFORCE_EQ(bias_dim.size(), 1UL,
                    platform::errors::InvalidArgument(
                        "ShapeError: the dimension of bias must equal to 1."
                        "But received: the shape of bias is [%s],the dimension "
                        "of bias is [%d]",
                        bias_dim, bias_dim.size()));
C
ceci3 已提交
120

121
  bool check = true;
122
  if ((!ctx->IsRuntime()) &&
123
      (phi::product(scale_dim) <= 0 || phi::product(bias_dim) <= 0)) {
124 125 126 127
    check = false;
  }

  if (check) {
128
    PADDLE_ENFORCE_EQ(scale_dim[0], C,
129 130 131 132
                      platform::errors::InvalidArgument(
                          "ShapeError: the shape of scale must equal to [%d]"
                          "But received: the shape of scale is [%d]",
                          C, scale_dim[0]));
133
    PADDLE_ENFORCE_EQ(bias_dim[0], C,
134 135 136 137
                      platform::errors::InvalidArgument(
                          "ShapeError: the shape of bias must equal to [%d]"
                          "But received: the shape of bias is [%d]",
                          C, bias_dim[0]));
138
  }
Q
qingqing01 已提交
139
  ctx->SetOutputDim("Y", x_dims);
140
  VLOG(4) << x_dims;
Q
qingqing01 已提交
141 142 143 144 145 146 147 148 149
  ctx->SetOutputDim("MeanOut", {C});
  ctx->SetOutputDim("VarianceOut", {C});
  ctx->SetOutputDim("SavedMean", {C});
  ctx->SetOutputDim("SavedVariance", {C});
  ctx->ShareLoD("X", "Y");
}

framework::OpKernelType BatchNormOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
150
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
Q
qingqing01 已提交
151 152 153 154 155 156 157
  // By default, the type of the scale, bias, mean,
  // and var tensors should both be float. (For float or float16 input tensor)
  // or double (For double input tensor).
  auto bn_param_type = framework::proto::VarType::FP32;
  if (input_data_type == framework::proto::VarType::FP64) {
    bn_param_type = framework::proto::VarType::FP64;
  }
K
Kaipeng Deng 已提交
158
  PADDLE_ENFORCE_EQ(
159 160
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Scale")->dtype()),
K
Kaipeng Deng 已提交
161 162
      platform::errors::InvalidArgument("Scale input should be of float type"));
  PADDLE_ENFORCE_EQ(
163 164
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Bias")->dtype()),
K
Kaipeng Deng 已提交
165 166
      platform::errors::InvalidArgument("Bias input should be of float type"));
  PADDLE_ENFORCE_EQ(
167 168
      bn_param_type,
      framework::TransToProtoVarType(ctx.Input<Tensor>("Mean")->dtype()),
K
Kaipeng Deng 已提交
169
      platform::errors::InvalidArgument("Mean input should be of float type"));
170 171
  PADDLE_ENFORCE_EQ(bn_param_type, framework::TransToProtoVarType(
                                       ctx.Input<Tensor>("Variance")->dtype()),
K
Kaipeng Deng 已提交
172 173
                    platform::errors::InvalidArgument(
                        "Variance input should be of float type"));
Q
qingqing01 已提交
174 175 176 177

  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::LibraryType library = framework::LibraryType::kPlain;
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
178
#ifdef PADDLE_WITH_MKLDNN
179 180
  if (library == framework::LibraryType::kPlain &&
      this->CanMKLDNNBeUsed(ctx, input_data_type)) {
Q
qingqing01 已提交
181 182
    library = framework::LibraryType::kMKLDNN;
    layout = framework::DataLayout::kMKLDNN;
K
Kexin Zhao 已提交
183
  }
Q
qingqing01 已提交
184
#endif
Q
Qiao Longfei 已提交
185

Q
qingqing01 已提交
186 187 188 189
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                 library);
}

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
framework::OpKernelType BatchNormOp::GetKernelTypeForVar(
    const std::string &var_name, const Tensor &tensor,
    const framework::OpKernelType &expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "X") &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
    auto dl = framework::StringToDataLayout(data_layout);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
206 207
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), dl);
208 209 210 211 212 213 214
    }
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

Q
qingqing01 已提交
215 216 217 218 219 220 221 222 223
void BatchNormOpMaker::Make() {
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
  AddAttr<float>("momentum", "").SetDefault(0.9);
  AddAttr<float>("epsilon", "")
      .SetDefault(1e-5)
      .AddCustomChecker([](const float &epsilon) {
K
Kaipeng Deng 已提交
224 225 226 227 228 229 230
        PADDLE_ENFORCE_GE(
            epsilon, 0.0f,
            platform::errors::InvalidArgument(
                "'epsilon' should be greater or equal than 0.0."));
        PADDLE_ENFORCE_LE(epsilon, 0.001f,
                          platform::errors::InvalidArgument(
                              "'epsilon' should be less or equal than 0.001."));
Q
qingqing01 已提交
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
      });
  AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
  AddInput("X", "The input tensor");
  AddInput("Scale",
           "Scale is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Bias",
           "Bias is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Mean",
           "The global mean (for training) or "
           "estimated mean (for testing)");
  AddInput("Variance",
           "The global variance (for training) "
           "or estimated Variance (for testing)");
246 247 248 249 250
  AddInput("MomentumTensor",
           "(Tensor<float32>, optional) If provided, batch_norm will "
           "use this as momentum, this has a higher priority than "
           "attr(momentum), the shape of this tensor MUST BE [1].")
      .AsDispensable();
Q
qingqing01 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
  AddOutput("Y", "result after normalization");
  AddOutput("MeanOut",
            "Share memory with Mean. "
            "Store the global mean when training");
  AddOutput("VarianceOut",
            "Share memory with Variance. "
            "Store the global Variance when training");
  AddOutput("SavedMean",
            "Mean of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
  AddOutput("SavedVariance",
            "Variance of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
266 267 268
  AddOutput("ReserveSpace",
            "Reserve GPU space for triggering the new semi-persistent "
            "NHWC kernel")
C
ceci3 已提交
269 270
      .AsDispensable()
      .AsExtra();
Q
qingqing01 已提交
271 272
  AddAttr<bool>("use_mkldnn",
                "(bool, default false) Only used in mkldnn kernel")
C
ceci3 已提交
273 274
      .SetDefault(false)
      .AsExtra();
Q
qingqing01 已提交
275 276
  AddAttr<bool>("fuse_with_relu",
                "(bool, default false) Only used in mkldnn kernel")
C
ceci3 已提交
277 278
      .SetDefault(false)
      .AsExtra();
Q
qingqing01 已提交
279 280 281 282 283 284 285 286
  AddAttr<bool>("use_global_stats",
                "(bool, default false) Whether to use global mean and "
                "variance. In inference or test mode, set use_global_stats "
                "to true or is_test true. the behavior is equivalent. "
                "In train mode, when setting use_global_stats True, the "
                "global mean and variance are also used during train time, "
                "the BN acts as scaling and shiffting.")
      .SetDefault(false);
287 288 289 290 291
  AddAttr<bool>("trainable_statistics",
                "(bool, default false) Whether to calculate mean and variance "
                "in test mode. If setting true in test mode, mean and variace "
                "will be calculated by current batch statistics.")
      .SetDefault(false);
Q
qingqing01 已提交
292
  AddComment(R"DOC(
293
Batch Normalization.
Q
Qiao Longfei 已提交
294

295 296 297 298 299 300
Batch Norm has been implemented as discussed in the paper:
https://arxiv.org/pdf/1502.03167.pdf
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Q
Qiao Longfei 已提交
301 302

)DOC");
Q
qingqing01 已提交
303
}
C
chengduo 已提交
304

Q
qingqing01 已提交
305 306
void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
  // check input
307 308 309 310 311 312 313
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNormGrad");
  OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
                 framework::GradVarName("Y"), "BatchNormGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
                 "BatchNormGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
                 "BatchNormGrad");
Q
qingqing01 已提交
314 315

  // check output
316 317
  const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale"));
  const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias"));
318
  const bool has_x_grad = ctx->HasOutput(framework::GradVarName("X"));
319 320

  PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad), true,
321
                    platform::errors::NotFound(
322 323 324 325 326
                        "Output(Scale@GRAD) and Output(Bias@GRAD) must be null "
                        "or not be null at same time. But now, "
                        "has Scale@Grad=[%d], has Bias@GRAD=[%d]",
                        has_scale_grad, has_bias_grad));

Q
qingqing01 已提交
327 328
  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
K
Kaipeng Deng 已提交
329 330 331 332 333
    PADDLE_ENFORCE_EQ(
        !ctx->Attrs().Get<bool>("use_mkldnn"), true,
        platform::errors::InvalidArgument(
            "Using global stats during training is not supported "
            "in gradient op kernel of batch_norm_mkldnn_op now."));
Q
qingqing01 已提交
334
  }
Q
Qiao Longfei 已提交
335

336 337 338 339
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNormGrad");
  const auto x_dims = ctx->GetInputDim("X");
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));
Q
Qiao Longfei 已提交
340

341
  const int C =
342
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
343 344 345 346 347 348 349
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);

  // has_scale_grad == has_bias_grad, judge has_scale_grad is enough
  if (has_scale_grad) {
    ctx->SetOutputDim(framework::GradVarName("Scale"), {C});
    ctx->SetOutputDim(framework::GradVarName("Bias"), {C});
Q
Qiao Longfei 已提交
350
  }
351 352 353
  if (has_x_grad) {
    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
  }
Q
qingqing01 已提交
354
}
Q
Qiao Longfei 已提交
355

Q
qingqing01 已提交
356 357 358 359
framework::OpKernelType BatchNormGradOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar(framework::GradVarName("Y"));
  if (var == nullptr) {
K
Kaipeng Deng 已提交
360 361
    PADDLE_THROW(
        platform::errors::InvalidArgument("can't find gradient variable of Y"));
Q
qingqing01 已提交
362 363 364 365 366 367 368 369
  }
  const Tensor *t = nullptr;
  if (var->IsType<Tensor>()) {
    t = &var->Get<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = &var->Get<LoDTensor>();
  }
  if (t == nullptr) {
K
Kaipeng Deng 已提交
370 371
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
Q
qingqing01 已提交
372
  }
373

Q
qingqing01 已提交
374 375 376
  // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
  framework::LibraryType library = framework::LibraryType::kPlain;
  framework::DataLayout layout = framework::DataLayout::kAnyLayout;
377
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
378

379
#ifdef PADDLE_WITH_MKLDNN
380 381
  if (library == framework::LibraryType::kPlain &&
      this->CanMKLDNNBeUsed(ctx, data_type)) {
Q
qingqing01 已提交
382 383 384
    library = framework::LibraryType::kMKLDNN;
    layout = framework::DataLayout::kMKLDNN;
  }
385
#endif
386

387
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
Q
qingqing01 已提交
388
}
Q
Qiao Longfei 已提交
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
framework::OpKernelType BatchNormGradOp::GetKernelTypeForVar(
    const std::string &var_name, const Tensor &tensor,
    const framework::OpKernelType &expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if (((var_name == "X") || (var_name == framework::GradVarName("Y"))) &&
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
    auto dl = framework::StringToDataLayout(data_layout);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), dl);
    }
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

H
hong 已提交
415
template <typename T>
416
void BatchNormGradMaker<T>::Apply(GradOpPtr<T> op) const {
417 418 419 420 421 422 423 424
  op->SetType(this->ForwardOpType() + "_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));

  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("Bias", this->Input("Bias"));
  op->SetInput("SavedMean", this->Output("SavedMean"));
  op->SetInput("SavedVariance", this->Output("SavedVariance"));
425 426 427
  if (this->HasOutput("ReserveSpace")) {
    op->SetInput("ReserveSpace", this->Output("ReserveSpace"));
  }
428 429

  // used when setting use_global_stats True during training
430 431
  if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats")) ||
      BOOST_GET_CONST(bool, this->GetAttr("is_test"))) {
432 433 434
    op->SetInput("Mean", this->Output("MeanOut"));
    op->SetInput("Variance", this->Output("VarianceOut"));
  }
435

436
  op->SetAttrMap(this->Attrs());
Y
Yu Yang 已提交
437

438 439 440 441
  op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
  op->SetOutput(framework::GradVarName("Scale"), this->InputGrad("Scale"));
  op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
}
Y
Yu Yang 已提交
442

443 444 445 446 447 448 449 450
template <typename T>
void BatchNormDoubleGradMaker<T>::Apply(GradOpPtr<T> op) const {
  op->SetType("batch_norm_grad_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("SavedMean", this->Input("SavedMean"));
  op->SetInput("SavedVariance", this->Input("SavedVariance"));
  if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats"))) {
451
    op->SetInput("Mean", this->Input("Mean"));
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
    op->SetInput("Variance", this->Input("Variance"));
  }
  op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
  op->SetInput("DDScale", this->OutputGrad(framework::GradVarName("Scale")));
  op->SetInput("DDBias", this->OutputGrad(framework::GradVarName("Bias")));
  op->SetInput("DY", this->Input(framework::GradVarName("Y")));

  op->SetAttrMap(this->Attrs());
  op->SetOutput("DX", this->InputGrad("X"));
  op->SetOutput("DScale", this->InputGrad("Scale"));
  op->SetOutput("DDY", this->InputGrad(framework::GradVarName("Y")));
}

void BatchNormDoubleGradOp::InferShape(
    framework::InferShapeContext *ctx) const {
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNormDoubleGrad");
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale",
                 "BatchNormDoubleGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedMean"), "Input", "SavedMean",
                 "BatchNormDoubleGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedVariance"), "Input", "SavedVariance",
                 "BatchNormDoubleGrad");

  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
    OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "VarianceOut",
                   "BatchNormDoubleGrad");
  }

  OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "BatchNormDoubleGrad");

  // check output
  OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX", "BatchNormDoubleGrad");

  const auto x_dims = ctx->GetInputDim("X");
487 488 489
  const DataLayout data_layout = framework::StringToDataLayout(
      ctx->Attrs().Get<std::string>("data_layout"));
  const int C =
490
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
491 492 493
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
  if (ctx->HasOutput("DX")) {
    ctx->SetOutputDim("DX", x_dims);
  }
  if (ctx->HasOutput("DScale")) {
    ctx->SetOutputDim("DScale", {C});
  }
  if (ctx->HasOutput("DDY")) {
    ctx->ShareDim("X", "DDY");
  }
}

framework::OpKernelType BatchNormDoubleGradOp::GetExpectedKernelType(
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar("DY");
  if (var == nullptr) {
    PADDLE_THROW(
        platform::errors::NotFound("cannot find gradient variable of Y"));
  }
  const Tensor *t = nullptr;
  if (var->IsType<Tensor>()) {
    t = &var->Get<Tensor>();
  } else if (var->IsType<LoDTensor>()) {
    t = &var->Get<LoDTensor>();
  }
  if (t == nullptr) {
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
  }
  return framework::OpKernelType(
      OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
}

DECLARE_INPLACE_OP_INFERER(BatchNormDoubleGradOpInplaceInferer, {"DY", "DDY"});

Q
Qiao Longfei 已提交
528 529 530 531
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
532 533 534 535

DECLARE_INFER_SHAPE_FUNCTOR(batch_norm, BatchNormInferShapeFunctor,
                            PD_INFER_META(phi::BatchNormInferMeta));

Y
Yu Yang 已提交
536
REGISTER_OPERATOR(batch_norm, ops::BatchNormOp, ops::BatchNormOpMaker,
H
hong 已提交
537 538 539
                  ops::BatchNormOpInferVarType,
                  ops::BatchNormGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormGradMaker<paddle::imperative::OpBase>);
540 541 542 543 544
REGISTER_OPERATOR(batch_norm_grad, ops::BatchNormGradOp,
                  ops::BatchNormDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(batch_norm_grad_grad, ops::BatchNormDoubleGradOp,
                  ops::BatchNormDoubleGradOpInplaceInferer);