elementwise_op.h 15.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
gongweibao 已提交
6

7
    http://www.apache.org/licenses/LICENSE-2.0
G
gongweibao 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
gongweibao 已提交
14 15

#pragma once
C
chengduo 已提交
16

17
#include <algorithm>  // for max
L
liuwei1031 已提交
18
#include <memory>
19
#include <string>
L
liuwei1031 已提交
20
#include <unordered_map>
21
#include <vector>
22
#include "paddle/fluid/framework/data_layout.h"
Y
Yi Wang 已提交
23 24
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
25
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
C
chengduo 已提交
26

27 28 29
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
G
gongweibao 已提交
30 31 32 33 34 35 36 37 38

namespace paddle {
namespace operators {

class ElementwiseOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  using Tensor = framework::Tensor;
C
chengduo 已提交
39 40

  void InferShape(framework::InferShapeContext *ctx) const override {
41 42 43 44 45 46 47 48 49 50 51
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");

    PADDLE_ENFORCE_EQ(
        ctx->GetInputsVarType("Y").front(),
        framework::proto::VarType::LOD_TENSOR,
        platform::errors::InvalidArgument(
            "The input var's type should be LoDTensor, but the "
            "received is %s [%s].",
            ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
C
chengduo 已提交
52 53

    if (ctx->GetInputsVarType("X").front() ==
54
        framework::proto::VarType::SELECTED_ROWS) {
55 56
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y").size(), 1u,
57 58 59 60 61
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the size of Y should be 1. "
              "But reveived the size of Y = %s.",
              ctx->GetInputDim("Y").size()));
62 63
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y")[0], 1,
64 65 66 67 68
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the first dimension of Y should be 1. "
              "But reveived the first dimension of Y = %s.",
              ctx->GetInputDim("Y")[0]));
69 70
    } else if (ctx->GetInputsVarType("X").front() !=
               framework::proto::VarType::LOD_TENSOR) {
71 72 73 74
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Input X's type[%s] is not supported by elementwise_op. Please set "
          "its type to LOD_TENSOR.",
          ctx->GetInputsVarType("X").front()));
C
chengduo 已提交
75
    }
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
      ctx->ShareDim("X", /*->*/ "Out");
      ctx->ShareLoD("X", /*->*/ "Out");
    } else {
      auto x_dims = ctx->GetInputDim("X");
      auto y_dims = ctx->GetInputDim("Y");
      int max_dim = std::max(x_dims.size(), y_dims.size());
      int axis = ctx->Attrs().Get<int>("axis");
      axis = (axis == -1 ? std::abs(x_dims.size() - y_dims.size()) : axis);
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
      GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims_array));
      // to do
      ctx->ShareLoD("X", /*->*/ "Out");
    }
G
gongweibao 已提交
96
  }
97 98

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
99
      const framework::ExecutionContext &ctx) const override {
100
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
101 102

#ifdef PADDLE_WITH_MKLDNN
103
    if (platform::CanMKLDNNBeUsed(ctx)) {
104 105 106 107 108 109 110
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
G
gongweibao 已提交
111 112
};

C
chengduo 已提交
113 114 115
class ElementwiseOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
116
  std::unordered_map<std::string, std::string> &GetInputOutputWithSameType()
C
chengduo 已提交
117
      const override {
118 119
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
120 121 122
  }
};

G
gongweibao 已提交
123 124
class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
125
  void Make() final {
126 127 128 129
    AddInputX();
    AddInputY();
    AddOpOutput();

G
gongweibao 已提交
130
    AddAttr<int>("axis",
131 132 133 134
                 "(int, default -1). If X.dimension != Y.dimension,"
                 "Y.dimension must be a subsequence of x.dimension. And axis "
                 "is the start dimension index "
                 "for broadcasting Y onto X. ")
G
gongweibao 已提交
135 136
        .SetDefault(-1)
        .EqualGreaterThan(-1);
137 138
    AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
        .SetDefault(false);
139
    AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
140
        .SetDefault("");
141
    AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
142
        .SetDefault("");
143 144 145 146
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
147
        .SetDefault(false);
148 149 150 151 152 153
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
        .InEnum({"float32", "int8", "bfloat16"});
    /* int8 parameters */
154 155 156 157 158 159 160 161 162
    AddAttr<float>("Scale_x",
                   "(float, default 1.0f), The quantize scale of X tensor")
        .SetDefault(1.0f);
    AddAttr<float>("Scale_y",
                   "(float, default 1.0f), The quantize scale of Y tensor")
        .SetDefault(1.0f);
    AddAttr<float>("Scale_out",
                   "(float, default 1.0f), The quantize scale of output data")
        .SetDefault(1.0f);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
    AddOpComment();
  }

 protected:
  virtual void AddInputX() {
    AddInput("X", "(Tensor), The first input tensor of elementwise op.");
  }
  virtual void AddInputY() {
    AddInput("Y", "(Tensor), The second input tensor of elementwise op.");
  }
  virtual void AddOpOutput() {
    AddOutput("Out",
              "N-dimension tensor. A location into which the result is stored. "
              "It's dimension "
              "equals with x");
  }
  virtual void AddOpComment() { AddComment(GetCommentExamples()); }

  virtual std::string GetOpFuntionality() const { return ""; }

  virtual std::string GetName() const = 0;
  virtual std::string GetEquation() const = 0;

  std::string GetCommentExamples() const {
    return string::Sprintf(R"DOC(
Elementwise %s Operator.

%s
K
kexinzhao 已提交
191 192 193

The equation is:

Y
Yu Yang 已提交
194
$$%s$$
K
kexinzhao 已提交
195

196
- $X$: a tensor of any dimension.
L
Luo Tao 已提交
197
- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$.
K
kexinzhao 已提交
198 199

There are two cases for this operator:
200

L
Luo Tao 已提交
201 202
1. The shape of $Y$ is the same with $X$.
2. The shape of $Y$ is a continuous subsequence of $X$.
K
kexinzhao 已提交
203 204

For case 2:
205

206 207
1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
   for broadcasting $Y$ onto $X$.
L
Luo Tao 已提交
208
2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
209
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
L
Luo Tao 已提交
210
   subsequence, such as shape(Y) = (2, 1) => (2).
K
kexinzhao 已提交
211

L
Luo Tao 已提交
212
For example:
213

G
gongweibao 已提交
214
  .. code-block:: text
G
gongweibao 已提交
215

216 217
    shape(X) = (2, 3, 4, 5), shape(Y) = (,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
L
Luo Tao 已提交
218
    shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
219 220
    shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
221
    shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
222

Y
Yu Yang 已提交
223
)DOC",
224
                           GetName(), GetOpFuntionality(), GetEquation());
G
gongweibao 已提交
225 226 227 228 229 230 231 232
  }
};

class ElementwiseOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

C
chengduo 已提交
233
  void InferShape(framework::InferShapeContext *ctx) const override {
234
    auto out_grad_name = framework::GradVarName("Out");
235 236 237
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
    OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
                   "ElementwiseOpGrad");
Q
Qiao Longfei 已提交
238 239 240
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
241 242
      ctx->ShareDim("X", /*->*/ x_grad_name);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
G
gongweibao 已提交
243
    }
Q
Qiao Longfei 已提交
244
    if (ctx->HasOutput(y_grad_name)) {
245 246
      ctx->ShareDim("Y", /*->*/ y_grad_name);
      ctx->ShareLoD("Y", /*->*/ y_grad_name);
G
gongweibao 已提交
247 248
    }
  }
249 250

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
251
      const framework::ExecutionContext &ctx) const override {
252 253
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));
254 255

#ifdef PADDLE_WITH_MKLDNN
256 257
    // If broadcasting is needed, use native implementation
    auto CanMKLDNNElementwiseAddGradBeUsed = [&]() {
258
      return (ctx.Input<Tensor>("X")->dims() == ctx.Input<Tensor>("Y")->dims());
259 260 261 262 263
    };

    if (platform::CanMKLDNNBeUsed(ctx) &&
        (ctx.Type() != "elementwise_add_grad" ||
         CanMKLDNNElementwiseAddGradBeUsed())) {
264 265 266 267 268 269 270
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
G
gongweibao 已提交
271
};
272

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->ShareDim("X", x_grad_name);
      ctx->ShareLoD("X", x_grad_name);
    }
    if (ctx->HasOutput(y_grad_name)) {
      ctx->ShareDim("Y", y_grad_name);
      ctx->ShareLoD("Y", y_grad_name);
    }
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
297
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

#ifdef PADDLE_WITH_MKLDNN
    if (platform::CanMKLDNNBeUsed(ctx)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
};

class ElementwiseOpDoubleGradWithoutDXDY
    : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
325 326
    framework::proto::VarType::Type input_data_type;
    if (ctx.HasInput("DDX") == false) {
327 328
      OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
                     "ElementwiseOpDoubleGradWithoutDXDY");
329
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
330
    } else if (ctx.HasInput("DDY") == false) {
331 332
      OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
                     "ElementwiseOpDoubleGradWithoutDXDY");
333
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
334
    } else {
335
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
336
    }
337 338 339 340 341 342 343 344 345 346 347 348

#ifdef PADDLE_WITH_MKLDNN
    if (platform::CanMKLDNNBeUsed(ctx)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
};

349 350 351
template <typename T>
class ElemwiseGradKernel : public framework::OpKernel<T> {
 public:
C
chengduo 已提交
352 353
  void Compute(const framework::ExecutionContext &context) const override {
    auto *dx =
354 355
        context.Output<framework::LoDTensor>(framework::GradVarName("X"));
    if (dx != nullptr) {
C
chengduo 已提交
356
      auto &dout =
357 358 359 360 361 362
          *context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
      dx->set_lod(dout.lod());
    }
  }
};

363 364
DECLARE_INPLACE_OP_INFERER(ElementwiseOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(ElementwiseGradOpInplaceInferer,
365 366
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
367 368
DECLARE_INPLACE_OP_INFERER(ElementwiseDoubleGradOpInplaceInferer,
                           {"DDX", "DDOut"});
D
dzhwinter 已提交
369

370 371 372
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseGradNoBufVarsInferer, "X", "Y");
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseDoubleGradNoBufVarsInferer, "Y",
                                    "DOut");
S
sneaxiy 已提交
373

G
gongweibao 已提交
374 375
}  // namespace operators
}  // namespace paddle
H
hong 已提交
376 377 378 379 380 381 382 383
#define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name)              \
  template <typename T>                                                 \
  class kernel_type##GradMaker                                          \
      : public paddle::framework::SingleGradOpMaker<T> {                \
   public:                                                              \
    using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker; \
                                                                        \
   protected:                                                           \
384
    void Apply(::paddle::framework::GradOpPtr<T> op) const override {   \
H
hong 已提交
385
      op->SetType(#kernel_type "_grad");                                \
386
      op->SetInput("X", this->Input("X"));                              \
H
hong 已提交
387 388 389 390 391 392 393 394 395
      op->SetInput("Y", this->Input("Y"));                              \
      op->SetInput(::paddle::framework::GradVarName("Out"),             \
                   this->OutputGrad("Out"));                            \
      op->SetAttrMap(this->Attrs());                                    \
      op->SetOutput(::paddle::framework::GradVarName("X"),              \
                    this->InputGrad("X"));                              \
      op->SetOutput(::paddle::framework::GradVarName("Y"),              \
                    this->InputGrad("Y"));                              \
    }                                                                   \
396 397
  }

398 399 400 401
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name)    \
  REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp,        \
                    ::paddle::operators::Elementwise##op_name##OpMaker, \
                    ::paddle::operators::ElementwiseOpInferVarType,     \
H
hong 已提交
402 403
                    op_type##GradMaker<::paddle::framework::OpDesc>,    \
                    op_type##GradMaker<::paddle::imperative::OpBase>,   \
404
                    ::paddle::operators::ElementwiseOpInplaceInferer);