batch_norm_op.cc 21.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/batch_norm_op.h"
16

Q
qingqing01 已提交
17
#include <memory>
S
Siddharth Goyal 已提交
18
#include <string>
Q
qingqing01 已提交
19
#include <unordered_map>
20

Y
Yi Wang 已提交
21
#include "paddle/fluid/framework/data_layout.h"
22 23 24
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Q
Qiao Longfei 已提交
25

H
hong 已提交
26 27 28
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/infermeta/multiary.h"

Q
Qiao Longfei 已提交
29 30 31
namespace paddle {
namespace operators {

Q
qingqing01 已提交
32
void BatchNormOp::InferShape(framework::InferShapeContext *ctx) const {
33 34 35 36 37 38 39
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Bias"), "Input", "Bias", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Mean"), "Input", "Mean", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "Variance", "BatchNorm");
  OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "BatchNorm");

Q
qingqing01 已提交
40
  bool is_test = ctx->Attrs().Get<bool>("is_test");
41 42 43
  bool trainable_stats = ctx->Attrs().Get<bool>("trainable_statistics");
  bool test_mode = is_test && (!trainable_stats);
  if (!test_mode) {
44
    OP_INOUT_CHECK(ctx->HasOutput("MeanOut"), "Output", "MeanOut", "BatchNorm");
45 46 47 48 49 50 51
    OP_INOUT_CHECK(
        ctx->HasOutput("VarianceOut"), "Output", "VarianceOut", "BatchNorm");
    OP_INOUT_CHECK(
        ctx->HasOutput("SavedMean"), "Output", "SavedMean", "BatchNorm");
    OP_INOUT_CHECK(ctx->HasOutput("SavedVariance"),
                   "Output",
                   "SavedVariance",
52
                   "BatchNorm");
Q
Qiao Longfei 已提交
53
  }
K
Kexin Zhao 已提交
54

Q
qingqing01 已提交
55
  // make sure Mean/MeanOut and Variance/VarianceOut share memory in Python
56 57
  PADDLE_ENFORCE_EQ(ctx->Inputs("Mean")[0],
                    ctx->Outputs("MeanOut")[0],
58 59 60
                    platform::errors::InvalidArgument(
                        "Mean and MeanOut should share the same memory"));
  PADDLE_ENFORCE_EQ(
61 62
      ctx->Inputs("Variance")[0],
      ctx->Outputs("VarianceOut")[0],
63 64
      platform::errors::InvalidArgument(
          "Variance and VarianceOut should share the same memory"));
Q
qingqing01 已提交
65 66

  const auto x_dims = ctx->GetInputDim("X");
67 68 69

  for (int i = 0; i < x_dims.size(); i++) {
    PADDLE_ENFORCE_EQ(
70 71
        (x_dims[i] == -1) || (x_dims[i] > 0),
        true,
72 73
        platform::errors::InvalidArgument(
            "Each dimension of input tensor is expected to be -1 or a "
74
            "positive number, but received %d. Input's shape is [%s].",
75 76
            x_dims[i],
            x_dims));
77 78
  }

79 80
  const DataLayout data_layout =
      phi::StringToDataLayout(ctx->Attrs().Get<std::string>("data_layout"));
Q
qingqing01 已提交
81

82 83
  if (ctx->IsRuntime() && ctx->HasInput("MomentumTensor")) {
    auto mom = ctx->Inputs("MomentumTensor");
84 85
    PADDLE_ENFORCE_EQ(mom.size(),
                      1,
86
                      platform::errors::InvalidArgument(
C
ceci3 已提交
87 88 89
                          "The input tensor MomentumTensor's size must be 1"
                          "But received: MomentumTensor's size is [%d]",
                          mom.size()));
90 91
  }

92
  PADDLE_ENFORCE_GE(
93 94
      x_dims.size(),
      2,
K
Kaipeng Deng 已提交
95 96 97 98
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input "
          "X must greater than or equal to 2. But received: the shape of input "
          "X = [%s], the dimension of input X =[%d]",
99 100
          x_dims,
          x_dims.size()));
101
  PADDLE_ENFORCE_LE(
102 103
      x_dims.size(),
      5,
K
Kaipeng Deng 已提交
104 105 106 107
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of input X "
          "must smaller than or equal to 5. But received: the shape of input X "
          "= [%s], the dimension of input X = [%d]",
108 109
          x_dims,
          x_dims.size()));
110 111
  VLOG(4) << ctx->IsRunMKLDNNKernel();
  VLOG(4) << data_layout;
Q
qingqing01 已提交
112
  const int64_t C =
113
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
114 115
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);
Q
qingqing01 已提交
116

117 118
  auto scale_dim = ctx->GetInputDim("Scale");
  auto bias_dim = ctx->GetInputDim("Bias");
Q
qingqing01 已提交
119

120
  PADDLE_ENFORCE_EQ(
121 122
      scale_dim.size(),
      1UL,
123 124 125 126
      platform::errors::InvalidArgument(
          "ShapeError: the dimension of scale must equal to 1."
          "But received: the shape of scale is [%s], the dimension "
          "of scale is [%d]",
127 128 129 130
          scale_dim,
          scale_dim.size()));
  PADDLE_ENFORCE_EQ(bias_dim.size(),
                    1UL,
131 132 133 134
                    platform::errors::InvalidArgument(
                        "ShapeError: the dimension of bias must equal to 1."
                        "But received: the shape of bias is [%s],the dimension "
                        "of bias is [%d]",
135 136
                        bias_dim,
                        bias_dim.size()));
C
ceci3 已提交
137

138
  bool check = true;
139
  if ((!ctx->IsRuntime()) &&
140
      (phi::product(scale_dim) <= 0 || phi::product(bias_dim) <= 0)) {
141 142 143 144
    check = false;
  }

  if (check) {
145 146
    PADDLE_ENFORCE_EQ(scale_dim[0],
                      C,
147 148 149
                      platform::errors::InvalidArgument(
                          "ShapeError: the shape of scale must equal to [%d]"
                          "But received: the shape of scale is [%d]",
150 151 152 153
                          C,
                          scale_dim[0]));
    PADDLE_ENFORCE_EQ(bias_dim[0],
                      C,
154 155 156
                      platform::errors::InvalidArgument(
                          "ShapeError: the shape of bias must equal to [%d]"
                          "But received: the shape of bias is [%d]",
157 158
                          C,
                          bias_dim[0]));
159
  }
Q
qingqing01 已提交
160
  ctx->SetOutputDim("Y", x_dims);
161
  ctx->ShareLoD("X", "Y");
162
  VLOG(4) << x_dims;
Q
qingqing01 已提交
163 164
  ctx->SetOutputDim("MeanOut", {C});
  ctx->SetOutputDim("VarianceOut", {C});
165 166 167 168
  if (!test_mode) {
    ctx->SetOutputDim("SavedMean", {C});
    ctx->SetOutputDim("SavedVariance", {C});
  }
169
  if (ctx->HasOutput("ReserveSpace")) {
170 171
    ctx->SetOutputDim("ReserveSpace", {-1});
  }
Q
qingqing01 已提交
172 173
}

174
phi::KernelKey BatchNormOp::GetExpectedKernelType(
Q
qingqing01 已提交
175
    const framework::ExecutionContext &ctx) const {
176
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
Q
qingqing01 已提交
177 178 179 180 181 182 183
  // By default, the type of the scale, bias, mean,
  // and var tensors should both be float. (For float or float16 input tensor)
  // or double (For double input tensor).
  auto bn_param_type = framework::proto::VarType::FP32;
  if (input_data_type == framework::proto::VarType::FP64) {
    bn_param_type = framework::proto::VarType::FP64;
  }
K
Kaipeng Deng 已提交
184
  PADDLE_ENFORCE_EQ(
185
      bn_param_type,
186 187
      framework::TransToProtoVarType(
          ctx.Input<phi::DenseTensor>("Scale")->dtype()),
K
Kaipeng Deng 已提交
188 189
      platform::errors::InvalidArgument("Scale input should be of float type"));
  PADDLE_ENFORCE_EQ(
190
      bn_param_type,
191 192
      framework::TransToProtoVarType(
          ctx.Input<phi::DenseTensor>("Bias")->dtype()),
K
Kaipeng Deng 已提交
193 194
      platform::errors::InvalidArgument("Bias input should be of float type"));
  PADDLE_ENFORCE_EQ(
195
      bn_param_type,
196 197
      framework::TransToProtoVarType(
          ctx.Input<phi::DenseTensor>("Mean")->dtype()),
K
Kaipeng Deng 已提交
198
      platform::errors::InvalidArgument("Mean input should be of float type"));
199 200 201 202 203
  PADDLE_ENFORCE_EQ(bn_param_type,
                    framework::TransToProtoVarType(
                        ctx.Input<phi::DenseTensor>("Variance")->dtype()),
                    platform::errors::InvalidArgument(
                        "Variance input should be of float type"));
Q
qingqing01 已提交
204

205
  return phi::KernelKey(input_data_type, ctx.GetPlace());
Q
qingqing01 已提交
206 207
}

208
phi::KernelKey BatchNormOp::GetKernelTypeForVar(
209
    const std::string &var_name,
210
    const phi::DenseTensor &tensor,
211
    const phi::KernelKey &expected_kernel_type) const {
212 213 214 215
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if ((var_name == "X") &&
216
      (expected_kernel_type.layout() == phi::DataLayout::ONEDNN) &&
217
      (tensor.layout() != phi::DataLayout::ONEDNN)) {
218 219 220
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
221
    auto dl = phi::StringToDataLayout(data_layout);
222 223
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
224
    if (dl != phi::DataLayout::kAnyLayout) {
225
      return phi::KernelKey(tensor.place(), dl, expected_kernel_type.dtype());
226 227 228
    }
  }
#endif
229 230
  return phi::KernelKey(
      tensor.place(), tensor.layout(), expected_kernel_type.dtype());
231 232
}

Q
qingqing01 已提交
233 234 235 236 237 238 239 240 241
void BatchNormOpMaker::Make() {
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);
  AddAttr<float>("momentum", "").SetDefault(0.9);
  AddAttr<float>("epsilon", "")
      .SetDefault(1e-5)
      .AddCustomChecker([](const float &epsilon) {
K
Kaipeng Deng 已提交
242
        PADDLE_ENFORCE_GE(
243 244
            epsilon,
            0.0f,
K
Kaipeng Deng 已提交
245 246
            platform::errors::InvalidArgument(
                "'epsilon' should be greater or equal than 0.0."));
247 248
        PADDLE_ENFORCE_LE(epsilon,
                          0.001f,
K
Kaipeng Deng 已提交
249 250
                          platform::errors::InvalidArgument(
                              "'epsilon' should be less or equal than 0.001."));
Q
qingqing01 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
      });
  AddAttr<std::string>("data_layout", "").SetDefault("NCHW");
  AddInput("X", "The input tensor");
  AddInput("Scale",
           "Scale is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Bias",
           "Bias is a 1-dimensional tensor of size C "
           "that is applied to the output");
  AddInput("Mean",
           "The global mean (for training) or "
           "estimated mean (for testing)");
  AddInput("Variance",
           "The global variance (for training) "
           "or estimated Variance (for testing)");
266
  AddInput("MomentumTensor",
267
           "(phi::DenseTensor<float32>, optional) If provided, batch_norm will "
268 269 270
           "use this as momentum, this has a higher priority than "
           "attr(momentum), the shape of this tensor MUST BE [1].")
      .AsDispensable();
Q
qingqing01 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
  AddOutput("Y", "result after normalization");
  AddOutput("MeanOut",
            "Share memory with Mean. "
            "Store the global mean when training");
  AddOutput("VarianceOut",
            "Share memory with Variance. "
            "Store the global Variance when training");
  AddOutput("SavedMean",
            "Mean of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
  AddOutput("SavedVariance",
            "Variance of the current mini batch, "
            "will apply to output when training")
      .AsIntermediate();
286 287 288
  AddOutput("ReserveSpace",
            "Reserve GPU space for triggering the new semi-persistent "
            "NHWC kernel")
C
ceci3 已提交
289 290
      .AsDispensable()
      .AsExtra();
Q
qingqing01 已提交
291 292 293 294 295 296 297 298
  AddAttr<bool>("use_global_stats",
                "(bool, default false) Whether to use global mean and "
                "variance. In inference or test mode, set use_global_stats "
                "to true or is_test true. the behavior is equivalent. "
                "In train mode, when setting use_global_stats True, the "
                "global mean and variance are also used during train time, "
                "the BN acts as scaling and shiffting.")
      .SetDefault(false);
299 300 301 302 303
  AddAttr<bool>("trainable_statistics",
                "(bool, default false) Whether to calculate mean and variance "
                "in test mode. If setting true in test mode, mean and variace "
                "will be calculated by current batch statistics.")
      .SetDefault(false);
Q
qingqing01 已提交
304
  AddComment(R"DOC(
305
Batch Normalization.
Q
Qiao Longfei 已提交
306

307 308 309 310 311 312
Batch Norm has been implemented as discussed in the paper:
https://arxiv.org/pdf/1502.03167.pdf
Can be used as a normalizer function for conv2d and fully_connected operations.
The required data format for this layer is one of the following:
1. NHWC `[batch, in_height, in_width, in_channels]`
2. NCHW `[batch, in_channels, in_height, in_width]`
Q
Qiao Longfei 已提交
313 314

)DOC");
Q
qingqing01 已提交
315
}
C
chengduo 已提交
316

Q
qingqing01 已提交
317 318
void BatchNormGradOp::InferShape(framework::InferShapeContext *ctx) const {
  // check input
319
  OP_INOUT_CHECK(ctx->HasInput("Scale"), "Input", "Scale", "BatchNormGrad");
320 321 322
  OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")),
                 "Input",
                 framework::GradVarName("Y"),
323
                 "BatchNormGrad");
324 325 326 327 328
  OP_INOUT_CHECK(
      ctx->HasInput("SavedMean"), "Input", "SavedMean", "BatchNormGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedVariance"),
                 "Input",
                 "SavedVariance",
329
                 "BatchNormGrad");
Q
qingqing01 已提交
330 331

  // check output
332 333
  const bool has_scale_grad = ctx->HasOutput(framework::GradVarName("Scale"));
  const bool has_bias_grad = ctx->HasOutput(framework::GradVarName("Bias"));
334
  const bool has_x_grad = ctx->HasOutput(framework::GradVarName("X"));
335

336 337
  PADDLE_ENFORCE_EQ((has_scale_grad == has_bias_grad),
                    true,
338
                    platform::errors::NotFound(
339 340 341
                        "Output(Scale@GRAD) and Output(Bias@GRAD) must be null "
                        "or not be null at same time. But now, "
                        "has Scale@Grad=[%d], has Bias@GRAD=[%d]",
342 343
                        has_scale_grad,
                        has_bias_grad));
344

Q
qingqing01 已提交
345 346
  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
K
Kaipeng Deng 已提交
347
    PADDLE_ENFORCE_EQ(
348 349
        !ctx->Attrs().Get<bool>("use_mkldnn"),
        true,
K
Kaipeng Deng 已提交
350 351
        platform::errors::InvalidArgument(
            "Using global stats during training is not supported "
352
            "in oneDNN version of batch_norm_gradient kernel now."));
Q
qingqing01 已提交
353
  }
Q
Qiao Longfei 已提交
354

355 356
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNormGrad");
  const auto x_dims = ctx->GetInputDim("X");
357 358
  const DataLayout data_layout =
      phi::StringToDataLayout(ctx->Attrs().Get<std::string>("data_layout"));
Q
Qiao Longfei 已提交
359

360
  const int C =
361
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
362 363 364 365 366 367 368
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);

  // has_scale_grad == has_bias_grad, judge has_scale_grad is enough
  if (has_scale_grad) {
    ctx->SetOutputDim(framework::GradVarName("Scale"), {C});
    ctx->SetOutputDim(framework::GradVarName("Bias"), {C});
Q
Qiao Longfei 已提交
369
  }
370 371 372
  if (has_x_grad) {
    ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
  }
Q
qingqing01 已提交
373
}
Q
Qiao Longfei 已提交
374

375
phi::KernelKey BatchNormGradOp::GetExpectedKernelType(
Q
qingqing01 已提交
376 377 378
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar(framework::GradVarName("Y"));
  if (var == nullptr) {
K
Kaipeng Deng 已提交
379 380
    PADDLE_THROW(
        platform::errors::InvalidArgument("can't find gradient variable of Y"));
Q
qingqing01 已提交
381
  }
382 383 384
  const phi::DenseTensor *t = nullptr;
  if (var->IsType<phi::DenseTensor>()) {
    t = &var->Get<phi::DenseTensor>();
385 386
  } else if (var->IsType<phi::DenseTensor>()) {
    t = &var->Get<phi::DenseTensor>();
Q
qingqing01 已提交
387 388
  }
  if (t == nullptr) {
K
Kaipeng Deng 已提交
389 390
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
Q
qingqing01 已提交
391
  }
392

393
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
394
  return phi::KernelKey(data_type, ctx.GetPlace());
Q
qingqing01 已提交
395
}
Q
Qiao Longfei 已提交
396

397
phi::KernelKey BatchNormGradOp::GetKernelTypeForVar(
398
    const std::string &var_name,
399
    const phi::DenseTensor &tensor,
400
    const phi::KernelKey &expected_kernel_type) const {
401 402 403 404
#ifdef PADDLE_WITH_MKLDNN
  // Only input require reshaping, weights and
  // bias are having shape in NCHW order
  if (((var_name == "X") || (var_name == framework::GradVarName("Y"))) &&
405
      (expected_kernel_type.layout() == phi::DataLayout::ONEDNN) &&
406
      (tensor.layout() != phi::DataLayout::ONEDNN)) {
407 408 409
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_layout = ar.Get<std::string>("data_layout");
410
    auto dl = phi::StringToDataLayout(data_layout);
411 412
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
413
    if (dl != phi::DataLayout::kAnyLayout) {
414
      return phi::KernelKey(tensor.place(), dl, expected_kernel_type.dtype());
415 416 417
    }
  }
#endif
418 419
  return phi::KernelKey(
      tensor.place(), tensor.layout(), expected_kernel_type.dtype());
420 421
}

H
hong 已提交
422
template <typename T>
423
void BatchNormGradMaker<T>::Apply(GradOpPtr<T> op) const {
424 425 426 427 428 429 430 431
  op->SetType(this->ForwardOpType() + "_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));

  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("Bias", this->Input("Bias"));
  op->SetInput("SavedMean", this->Output("SavedMean"));
  op->SetInput("SavedVariance", this->Output("SavedVariance"));
432 433 434
  if (this->HasOutput("ReserveSpace")) {
    op->SetInput("ReserveSpace", this->Output("ReserveSpace"));
  }
435 436

  // used when setting use_global_stats True during training
R
Ruibiao Chen 已提交
437 438
  if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats")) ||
      PADDLE_GET_CONST(bool, this->GetAttr("is_test"))) {
439 440 441
    op->SetInput("Mean", this->Output("MeanOut"));
    op->SetInput("Variance", this->Output("VarianceOut"));
  }
442

443
  op->SetAttrMap(this->Attrs());
Y
Yu Yang 已提交
444

445 446 447 448
  op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
  op->SetOutput(framework::GradVarName("Scale"), this->InputGrad("Scale"));
  op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
}
Y
Yu Yang 已提交
449

450 451 452 453 454 455 456
template <typename T>
void BatchNormDoubleGradMaker<T>::Apply(GradOpPtr<T> op) const {
  op->SetType("batch_norm_grad_grad");
  op->SetInput("X", this->Input("X"));
  op->SetInput("Scale", this->Input("Scale"));
  op->SetInput("SavedMean", this->Input("SavedMean"));
  op->SetInput("SavedVariance", this->Input("SavedVariance"));
R
Ruibiao Chen 已提交
457
  if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats"))) {
458
    op->SetInput("Mean", this->Input("Mean"));
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
    op->SetInput("Variance", this->Input("Variance"));
  }
  op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
  op->SetInput("DDScale", this->OutputGrad(framework::GradVarName("Scale")));
  op->SetInput("DDBias", this->OutputGrad(framework::GradVarName("Bias")));
  op->SetInput("DY", this->Input(framework::GradVarName("Y")));

  op->SetAttrMap(this->Attrs());
  op->SetOutput("DX", this->InputGrad("X"));
  op->SetOutput("DScale", this->InputGrad("Scale"));
  op->SetOutput("DDY", this->InputGrad(framework::GradVarName("Y")));
}

void BatchNormDoubleGradOp::InferShape(
    framework::InferShapeContext *ctx) const {
  OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "BatchNormDoubleGrad");
475 476 477 478 479 480 481
  OP_INOUT_CHECK(
      ctx->HasInput("Scale"), "Input", "Scale", "BatchNormDoubleGrad");
  OP_INOUT_CHECK(
      ctx->HasInput("SavedMean"), "Input", "SavedMean", "BatchNormDoubleGrad");
  OP_INOUT_CHECK(ctx->HasInput("SavedVariance"),
                 "Input",
                 "SavedVariance",
482 483 484 485
                 "BatchNormDoubleGrad");

  const bool use_global_stats = ctx->Attrs().Get<bool>("use_global_stats");
  if (use_global_stats) {
486 487 488
    OP_INOUT_CHECK(ctx->HasInput("Variance"),
                   "Input",
                   "VarianceOut",
489 490 491 492 493 494 495 496 497
                   "BatchNormDoubleGrad");
  }

  OP_INOUT_CHECK(ctx->HasInput("DY"), "Input", "DY", "BatchNormDoubleGrad");

  // check output
  OP_INOUT_CHECK(ctx->HasOutput("DX"), "Output", "DX", "BatchNormDoubleGrad");

  const auto x_dims = ctx->GetInputDim("X");
498 499
  const DataLayout data_layout =
      phi::StringToDataLayout(ctx->Attrs().Get<std::string>("data_layout"));
500
  const int C =
501
      ((ctx->IsRunMKLDNNKernel() == true) || (data_layout == DataLayout::kNCHW)
502 503 504
           ? x_dims[1]
           : x_dims[x_dims.size() - 1]);

505 506 507 508 509 510 511 512 513 514 515
  if (ctx->HasOutput("DX")) {
    ctx->SetOutputDim("DX", x_dims);
  }
  if (ctx->HasOutput("DScale")) {
    ctx->SetOutputDim("DScale", {C});
  }
  if (ctx->HasOutput("DDY")) {
    ctx->ShareDim("X", "DDY");
  }
}

516
phi::KernelKey BatchNormDoubleGradOp::GetExpectedKernelType(
517 518 519 520 521 522
    const framework::ExecutionContext &ctx) const {
  const auto *var = ctx.InputVar("DY");
  if (var == nullptr) {
    PADDLE_THROW(
        platform::errors::NotFound("cannot find gradient variable of Y"));
  }
523 524 525
  const phi::DenseTensor *t = nullptr;
  if (var->IsType<phi::DenseTensor>()) {
    t = &var->Get<phi::DenseTensor>();
526 527
  } else if (var->IsType<phi::DenseTensor>()) {
    t = &var->Get<phi::DenseTensor>();
528 529 530 531 532
  }
  if (t == nullptr) {
    PADDLE_THROW(
        platform::errors::InvalidArgument("gradient variable of Y is empty"));
  }
533 534
  return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
                        ctx.GetPlace());
535 536 537 538
}

DECLARE_INPLACE_OP_INFERER(BatchNormDoubleGradOpInplaceInferer, {"DY", "DDY"});

Q
Qiao Longfei 已提交
539 540 541 542
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
543

544 545
DECLARE_INFER_SHAPE_FUNCTOR(batch_norm,
                            BatchNormInferShapeFunctor,
H
hong 已提交
546 547
                            PD_INFER_META(phi::BatchNormInferMeta));

548 549 550
REGISTER_OPERATOR(batch_norm,
                  ops::BatchNormOp,
                  ops::BatchNormOpMaker,
H
hong 已提交
551 552 553
                  ops::BatchNormOpInferVarType,
                  ops::BatchNormGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormGradMaker<paddle::imperative::OpBase>);
554

555 556
REGISTER_OPERATOR(batch_norm_grad,
                  ops::BatchNormGradOp,
557 558
                  ops::BatchNormDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::BatchNormDoubleGradMaker<paddle::imperative::OpBase>);
559 560
REGISTER_OPERATOR(batch_norm_grad_grad,
                  ops::BatchNormDoubleGradOp,
561
                  ops::BatchNormDoubleGradOpInplaceInferer);