layer_norm_op.cc 11.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/layer_norm_op.h"
16

S
sneaxiy 已提交
17
#include <memory>
F
furnace 已提交
18
#include <string>
C
chengduoZH 已提交
19

20 21 22 23
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif

C
chengduoZH 已提交
24 25 26 27 28 29 30 31 32 33 34 35
namespace paddle {
namespace operators {

using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
using DataLayout = framework::DataLayout;

class LayerNormOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
36 37 38 39 40
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "LayerNorm");
    OP_INOUT_CHECK(ctx->HasOutput("Y"), "Output", "Y", "LayerNorm");
    OP_INOUT_CHECK(ctx->HasOutput("Mean"), "Output", "Mean", "LayerNorm");
    OP_INOUT_CHECK(ctx->HasOutput("Variance"), "Output", "Variance",
                   "LayerNorm");
C
chengduoZH 已提交
41

C
chengduoZH 已提交
42 43
    auto x_dim = ctx->GetInputDim("X");
    auto begin_norm_axis = ctx->Attrs().Get<int>("begin_norm_axis");
44 45 46 47 48 49 50
    PADDLE_ENFORCE_LT(
        begin_norm_axis, x_dim.size(),
        platform::errors::InvalidArgument(
            "'begin_norm_axis' must be less than the dimensions of X,"
            "But received 'begin_norm_axis' is [%d],"
            "received the dimensions of X is [%d].",
            begin_norm_axis, x_dim.size()));
C
chengduoZH 已提交
51

52
    auto matrix_dim = pten::flatten_to_2d(x_dim, begin_norm_axis);
C
chengduoZH 已提交
53
    int left = static_cast<int>(matrix_dim[0]);
C
chengduoZH 已提交
54
    int right = static_cast<int>(matrix_dim[1]);
C
chengduoZH 已提交
55
    if (ctx->HasInput("Scale")) {
56 57 58 59 60 61
      PADDLE_ENFORCE_EQ(ctx->GetInputDim("Scale").size(), 1,
                        platform::errors::InvalidArgument(
                            "The dimensions of Input(Scale) must be 1, but "
                            "received dimensions of"
                            "Input(Scale) is [%d]",
                            ctx->GetInputDim("Scale").size()));
P
phlrain 已提交
62 63

      if (ctx->IsRuntime()) {
64 65 66 67 68 69 70 71 72
        PADDLE_ENFORCE_EQ(
            ctx->GetInputDim("Scale")[0], right,
            platform::errors::InvalidArgument(
                "The first dimension value of Input(Scale) must equal to be the"
                "second dimension value of the flattened 2D matrix of Input(X),"
                "But received the first dimension value of Input(Scale) is"
                "[%d], the second dimension value of the flattened 2D matrix of"
                " Input(Scale) is [%d].",
                ctx->GetInputDim("Scale")[0], right));
P
phlrain 已提交
73
      }
C
chengduoZH 已提交
74 75
    }
    if (ctx->HasInput("Bias")) {
76 77 78 79 80 81
      PADDLE_ENFORCE_EQ(ctx->GetInputDim("Bias").size(), 1,
                        platform::errors::InvalidArgument(
                            "The dimensions of Input(Bias) must be 1, but "
                            "received dimensions of"
                            "Input(Bias) is [%d]",
                            ctx->GetInputDim("Bias").size()));
P
phlrain 已提交
82
      if (ctx->IsRuntime()) {
83 84 85 86 87 88 89 90 91
        PADDLE_ENFORCE_EQ(
            ctx->GetInputDim("Bias")[0], right,
            platform::errors::InvalidArgument(
                "The first dimension value of Input(Bias) must equal to be the"
                "second dimension value of the flattened 2D matrix of Input(X),"
                "But received the first dimension value of Input(Bias) is"
                "[%d], the second dimension value of the flattened 2D matrix of"
                " Input(Bias) is [%d].",
                ctx->GetInputDim("Scale")[0], right));
P
phlrain 已提交
92
      }
C
chengduoZH 已提交
93
    }
C
chengduoZH 已提交
94

C
chengduoZH 已提交
95
    ctx->SetOutputDim("Y", ctx->GetInputDim("X"));
C
chengduoZH 已提交
96 97
    ctx->SetOutputDim("Mean", {left});
    ctx->SetOutputDim("Variance", {left});
C
chengduoZH 已提交
98 99
    ctx->ShareLoD("X", "Y");
  }
100 101 102

 protected:
  framework::OpKernelType GetExpectedKernelType(
F
furnace 已提交
103 104
      const framework::ExecutionContext &ctx) const override {
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
105 106 107 108 109
    framework::LibraryType library = framework::LibraryType::kPlain;
    framework::DataLayout layout = framework::DataLayout::kAnyLayout;

#ifdef PADDLE_WITH_MKLDNN
    if (library == framework::LibraryType::kPlain &&
110
        this->CanMKLDNNBeUsed(ctx, input_data_type)) {
111 112 113 114 115
      library = framework::LibraryType::kMKLDNN;
      layout = framework::DataLayout::kMKLDNN;
    }
#endif

F
furnace 已提交
116 117
    return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
                                   library);
118
  }
C
chengduoZH 已提交
119 120 121 122
};

class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
123
  void Make() override {
Y
yuyang18 已提交
124
    AddInput("X", "The input tensor.");
C
chengduoZH 已提交
125
    AddInput("Scale",
Y
yuyang18 已提交
126
             "(optional) Scale is a 1-dimensional tensor of size "
C
chengduoZH 已提交
127 128 129
             "H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
             "It is applied to the output.")
        .AsDispensable();
C
chengduoZH 已提交
130
    AddInput("Bias",
Y
yuyang18 已提交
131
             "(optional) Bias is a 1-dimensional tensor of size "
C
chengduoZH 已提交
132 133 134
             "H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])."
             "It is applied to the output.")
        .AsDispensable();
Y
yuyang18 已提交
135 136 137
    AddOutput("Y", "Result after normalization.");
    AddOutput("Mean", "Mean of the current mini batch.").AsIntermediate();
    AddOutput("Variance", "Variance of the current mini batch.")
C
chengduoZH 已提交
138 139 140
        .AsIntermediate();

    AddAttr<float>("epsilon",
Y
yuyang18 已提交
141
                   "Constant for numerical stability [default 1e-5].")
C
chengduoZH 已提交
142 143
        .SetDefault(1e-5)
        .AddCustomChecker([](const float &epsilon) {
144 145 146 147 148
          PADDLE_ENFORCE_EQ(epsilon >= 0.0f && epsilon <= 0.001f, true,
                            platform::errors::InvalidArgument(
                                "'epsilon' in Op(LayerNorm) should be between"
                                "0.0 and 0.001, But received [%s].",
                                epsilon));
C
chengduoZH 已提交
149
        });
C
chengduoZH 已提交
150
    AddAttr<int>("begin_norm_axis",
Y
yuyang18 已提交
151
                 "the axis of `begin_norm_axis ... Rank(X) - 1` will be "
C
chengduoZH 已提交
152
                 "normalized. `begin_norm_axis` splits the tensor(`X`) to a "
Y
yuyang18 已提交
153
                 "matrix [N,H]. [default 1].")
C
chengduoZH 已提交
154 155 156
        .SetDefault(1)
        .AddCustomChecker([](const int &begin_norm_axis) {
          PADDLE_ENFORCE_GT(begin_norm_axis, 0,
157 158 159 160
                            platform::errors::InvalidArgument(
                                "'begin_norm_axis' in Op(LayerNorm) should be"
                                "greater than zero. But received [%d].",
                                begin_norm_axis));
C
chengduoZH 已提交
161
        });
162 163
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
164 165
        .SetDefault(false)
        .AsExtra();
166 167 168 169
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
170 171
        .InEnum({"float32", "bfloat16"})
        .AsExtra();
172 173 174
    AddAttr<bool>("is_test",
                  "(bool, default false) Set to true for inference only, false "
                  "for training. Some layers may run faster when this is true.")
175 176
        .SetDefault(false)
        .AsExtra();
C
chengduoZH 已提交
177 178

    AddComment(R"DOC(
Y
yuyang18 已提交
179 180 181 182 183 184 185 186
Assume feature vectors exist on dimensions
:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics
along these dimensions for each feature vector :math:`a` with size
:math:`H`, then normalize each feature vector using the corresponding
statistics. After that, apply learnable gain and bias on the normalized
tensor to scale and shift if :attr:`scale` and :attr:`shift` are set.

Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
C
chengduoZH 已提交
187 188 189 190 191 192 193 194 195 196
)DOC");
  }
};

class LayerNormGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    // check input
197 198 199 200 201 202
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "LayerNormGrad");
    OP_INOUT_CHECK(ctx->HasInput("Mean"), "Input", "Mean", "LayerNormGrad");
    OP_INOUT_CHECK(ctx->HasInput("Variance"), "Input", "Variance",
                   "LayerNormGrad");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), "Input",
                   framework::GradVarName("Y"), "LayerNormGrad");
C
chengduoZH 已提交
203 204 205

    // check output
    if (ctx->HasOutput(framework::GradVarName("X"))) {
C
chengduoZH 已提交
206
      ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
C
chengduoZH 已提交
207 208
    }
    if (ctx->HasOutput(framework::GradVarName("Scale"))) {
C
chengduoZH 已提交
209 210
      ctx->SetOutputDim(framework::GradVarName("Scale"),
                        ctx->GetInputDim("Scale"));
C
chengduoZH 已提交
211 212
    }
    if (ctx->HasOutput(framework::GradVarName("Bias"))) {
C
chengduoZH 已提交
213
      ctx->SetOutputDim(framework::GradVarName("Bias"),
214
                        ctx->GetInputDim("Bias"));
C
chengduoZH 已提交
215 216 217 218 219 220 221
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    const auto *var = ctx.InputVar(framework::GradVarName("Y"));
222 223
    PADDLE_ENFORCE_NOT_NULL(var, platform::errors::NotFound(
                                     "Y@GRAD of LayerNorm Op is not found."));
C
chengduoZH 已提交
224 225 226 227 228 229
    const Tensor *t = nullptr;
    if (var->IsType<Tensor>()) {
      t = &var->Get<Tensor>();
    } else if (var->IsType<LoDTensor>()) {
      t = &var->Get<LoDTensor>();
    }
230 231
    PADDLE_ENFORCE_NOT_NULL(
        t, platform::errors::NotFound("Y@GRAD of LayerNorm Op is not found."));
F
furnace 已提交
232 233 234 235 236 237 238

    framework::LibraryType library = framework::LibraryType::kPlain;
    framework::DataLayout layout = framework::DataLayout::kAnyLayout;

    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace(),
        layout, library);
C
chengduoZH 已提交
239 240 241
  }
};

H
hong 已提交
242 243
template <typename T>
class LayerNormGradOpMaker : public framework::SingleGradOpMaker<T> {
S
sneaxiy 已提交
244
 public:
H
hong 已提交
245
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
S
sneaxiy 已提交
246 247

 protected:
248
  void Apply(GradOpPtr<T> op) const override {
S
sneaxiy 已提交
249
    op->SetType("layer_norm_grad");
H
hong 已提交
250 251 252 253 254 255
    op->SetInput("X", this->Input("X"));
    op->SetInput("Mean", this->Output("Mean"));
    op->SetInput("Variance", this->Output("Variance"));
    if (this->HasInput("Scale")) {
      op->SetInput("Scale", this->Input("Scale"));
      op->SetOutput(framework::GradVarName("Scale"), this->InputGrad("Scale"));
S
sneaxiy 已提交
256 257
    }

H
hong 已提交
258
    if (this->HasInput("Bias")) {
259
      op->SetInput("Bias", this->Input("Bias"));
H
hong 已提交
260
      op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
S
sneaxiy 已提交
261 262
    }

H
hong 已提交
263 264 265
    op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
S
sneaxiy 已提交
266 267 268
  }
};

269
DECLARE_NO_NEED_BUFFER_VARS_INFERER(LayerNormGradNoNeedBufferVarInferer,
270 271
                                    "Bias");

C
chengduoZH 已提交
272 273 274 275
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
Y
Yang Yang 已提交
276
REGISTER_OPERATOR(layer_norm, ops::LayerNormOp, ops::LayerNormOpMaker,
H
hong 已提交
277 278
                  ops::LayerNormGradOpMaker<paddle::framework::OpDesc>,
                  ops::LayerNormGradOpMaker<paddle::imperative::OpBase>);
279
REGISTER_OPERATOR(layer_norm_grad, ops::LayerNormGradOp,
280
                  ops::LayerNormGradNoNeedBufferVarInferer);
C
chengduoZH 已提交
281
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
282 283
    layer_norm, ops::LayerNormKernel<paddle::platform::CPUDeviceContext, float>,
    ops::LayerNormKernel<paddle::platform::CPUDeviceContext, double>);
C
chengduoZH 已提交
284 285
REGISTER_OP_CPU_KERNEL(
    layer_norm_grad,
C
chengduoZH 已提交
286 287
    ops::LayerNormGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::LayerNormGradKernel<paddle::platform::CPUDeviceContext, double>);