elementwise_op.h 18.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
gongweibao 已提交
6

7
    http://www.apache.org/licenses/LICENSE-2.0
G
gongweibao 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
gongweibao 已提交
14 15

#pragma once
C
chengduo 已提交
16

17
#include <algorithm>  // for max
L
liuwei1031 已提交
18
#include <memory>
19
#include <string>
L
liuwei1031 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/data_layout.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
26
#include "paddle/fluid/operators/common_infer_shape_functions.h"
27
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
C
chengduo 已提交
28

29 30 31
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
G
gongweibao 已提交
32 33 34 35 36 37 38 39 40

namespace paddle {
namespace operators {

class ElementwiseOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  using Tensor = framework::Tensor;
C
chengduo 已提交
41 42

  void InferShape(framework::InferShapeContext *ctx) const override {
43 44 45 46 47 48 49 50 51 52 53
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");

    PADDLE_ENFORCE_EQ(
        ctx->GetInputsVarType("Y").front(),
        framework::proto::VarType::LOD_TENSOR,
        platform::errors::InvalidArgument(
            "The input var's type should be LoDTensor, but the "
            "received is %s [%s].",
            ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
C
chengduo 已提交
54 55

    if (ctx->GetInputsVarType("X").front() ==
56
        framework::proto::VarType::SELECTED_ROWS) {
57 58
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y").size(), 1u,
59 60 61 62 63
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the size of Y should be 1. "
              "But reveived the size of Y = %s.",
              ctx->GetInputDim("Y").size()));
64 65
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y")[0], 1,
66 67 68 69 70
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the first dimension of Y should be 1. "
              "But reveived the first dimension of Y = %s.",
              ctx->GetInputDim("Y")[0]));
71 72
    } else if (ctx->GetInputsVarType("X").front() !=
               framework::proto::VarType::LOD_TENSOR) {
73 74 75 76
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Input X's type[%s] is not supported by elementwise_op. Please set "
          "its type to LOD_TENSOR.",
          ctx->GetInputsVarType("X").front()));
C
chengduo 已提交
77
    }
78

79 80 81 82 83 84 85 86
    if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
      ctx->ShareDim("X", /*->*/ "Out");
      ctx->ShareLoD("X", /*->*/ "Out");
    } else {
      auto x_dims = ctx->GetInputDim("X");
      auto y_dims = ctx->GetInputDim("Y");
      int max_dim = std::max(x_dims.size(), y_dims.size());
      int axis = ctx->Attrs().Get<int>("axis");
87 88 89 90 91 92 93
      PADDLE_ENFORCE_EQ((axis >= (-1 * max_dim)) && (axis < max_dim), true,
                        platform::errors::InvalidArgument(
                            "The axis range must be [%s, %s), but axis is %s. "
                            "Please set the axis again.",
                            -1 * max_dim, max_dim, axis));
      axis = (axis < 0 ? (std::abs(x_dims.size() - y_dims.size()) + axis + 1)
                       : axis);
94 95 96 97 98 99 100 101 102 103
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
      GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims_array));
      // to do
      ctx->ShareLoD("X", /*->*/ "Out");
    }
G
gongweibao 已提交
104
  }
105 106

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
107
      const framework::ExecutionContext &ctx) const override {
108 109
    auto input_data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");
110 111

#ifdef PADDLE_WITH_MKLDNN
112
    if (this->CanMKLDNNBeUsed(ctx)) {
113 114 115 116 117 118 119
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
120 121 122 123 124 125 126 127 128 129 130 131 132

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
G
gongweibao 已提交
133 134
};

C
chengduo 已提交
135 136 137
class ElementwiseOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
138
  std::unordered_map<std::string, std::string> &GetInputOutputWithSameType()
C
chengduo 已提交
139
      const override {
140 141
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
142 143 144
  }
};

G
gongweibao 已提交
145 146
class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
147
  void Make() final {
148 149 150 151
    AddInputX();
    AddInputY();
    AddOpOutput();

G
gongweibao 已提交
152
    AddAttr<int>("axis",
153 154 155 156
                 "(int, default -1). If X.dimension != Y.dimension,"
                 "Y.dimension must be a subsequence of x.dimension. And axis "
                 "is the start dimension index "
                 "for broadcasting Y onto X. ")
157
        .SetDefault(-1);
158 159
    AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
        .SetDefault(false);
160
    AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
161
        .SetDefault("");
162
    AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
163
        .SetDefault("");
164 165 166 167
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
168
        .SetDefault(false);
169 170 171 172 173 174
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
        .InEnum({"float32", "int8", "bfloat16"});
    /* int8 parameters */
175 176 177 178 179 180 181 182 183
    AddAttr<float>("Scale_x",
                   "(float, default 1.0f), The quantize scale of X tensor")
        .SetDefault(1.0f);
    AddAttr<float>("Scale_y",
                   "(float, default 1.0f), The quantize scale of Y tensor")
        .SetDefault(1.0f);
    AddAttr<float>("Scale_out",
                   "(float, default 1.0f), The quantize scale of output data")
        .SetDefault(1.0f);
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
    AddOpComment();
  }

 protected:
  virtual void AddInputX() {
    AddInput("X", "(Tensor), The first input tensor of elementwise op.");
  }
  virtual void AddInputY() {
    AddInput("Y", "(Tensor), The second input tensor of elementwise op.");
  }
  virtual void AddOpOutput() {
    AddOutput("Out",
              "N-dimension tensor. A location into which the result is stored. "
              "It's dimension "
              "equals with x");
  }
  virtual void AddOpComment() { AddComment(GetCommentExamples()); }

  virtual std::string GetOpFuntionality() const { return ""; }

  virtual std::string GetName() const = 0;
  virtual std::string GetEquation() const = 0;

  std::string GetCommentExamples() const {
    return string::Sprintf(R"DOC(
Elementwise %s Operator.

%s
K
kexinzhao 已提交
212 213 214

The equation is:

Y
Yu Yang 已提交
215
$$%s$$
K
kexinzhao 已提交
216

217
- $X$: a tensor of any dimension.
L
Luo Tao 已提交
218
- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$.
K
kexinzhao 已提交
219 220

There are two cases for this operator:
221

L
Luo Tao 已提交
222 223
1. The shape of $Y$ is the same with $X$.
2. The shape of $Y$ is a continuous subsequence of $X$.
K
kexinzhao 已提交
224 225

For case 2:
226

227 228
1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
   for broadcasting $Y$ onto $X$.
L
Luo Tao 已提交
229
2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
230
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
L
Luo Tao 已提交
231
   subsequence, such as shape(Y) = (2, 1) => (2).
K
kexinzhao 已提交
232

L
Luo Tao 已提交
233
For example:
234

G
gongweibao 已提交
235
  .. code-block:: text
G
gongweibao 已提交
236

237 238
    shape(X) = (2, 3, 4, 5), shape(Y) = (,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
L
Luo Tao 已提交
239
    shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
240 241
    shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
242
    shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
243

Y
Yu Yang 已提交
244
)DOC",
245
                           GetName(), GetOpFuntionality(), GetEquation());
G
gongweibao 已提交
246 247 248 249 250 251 252 253
  }
};

class ElementwiseOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

C
chengduo 已提交
254
  void InferShape(framework::InferShapeContext *ctx) const override {
255
    auto out_grad_name = framework::GradVarName("Out");
256 257 258
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
    OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
                   "ElementwiseOpGrad");
Q
Qiao Longfei 已提交
259 260 261
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
262 263
      ctx->ShareDim("X", /*->*/ x_grad_name);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
G
gongweibao 已提交
264
    }
Q
Qiao Longfei 已提交
265
    if (ctx->HasOutput(y_grad_name)) {
266 267
      ctx->ShareDim("Y", /*->*/ y_grad_name);
      ctx->ShareLoD("Y", /*->*/ y_grad_name);
G
gongweibao 已提交
268 269
    }
  }
270 271

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
272
      const framework::ExecutionContext &ctx) const override {
273 274
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));
275 276

#ifdef PADDLE_WITH_MKLDNN
277 278
    // If broadcasting is needed, use native implementation
    auto CanMKLDNNElementwiseAddGradBeUsed = [&]() {
279
      return (ctx.Input<Tensor>("X")->dims() == ctx.Input<Tensor>("Y")->dims());
280 281
    };

282 283
    if (this->CanMKLDNNBeUsed(ctx) && (ctx.Type() != "elementwise_add_grad" ||
                                       CanMKLDNNElementwiseAddGradBeUsed())) {
284 285 286 287 288 289 290
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
291 292 293 294 295 296 297 298 299 300 301 302 303

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
G
gongweibao 已提交
304
};
305

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->ShareDim("X", x_grad_name);
      ctx->ShareLoD("X", x_grad_name);
    }
    if (ctx->HasOutput(y_grad_name)) {
      ctx->ShareDim("Y", y_grad_name);
      ctx->ShareLoD("Y", y_grad_name);
    }
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
330
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
331 332

#ifdef PADDLE_WITH_MKLDNN
333
    if (this->CanMKLDNNBeUsed(ctx)) {
334 335 336 337 338 339 340
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
341 342 343 344 345 346 347 348 349 350 351 352 353

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
};

class ElementwiseOpDoubleGradWithoutDXDY
    : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
371 372
    framework::proto::VarType::Type input_data_type;
    if (ctx.HasInput("DDX") == false) {
373 374
      OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
                     "ElementwiseOpDoubleGradWithoutDXDY");
375
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
376
    } else if (ctx.HasInput("DDY") == false) {
377 378
      OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
                     "ElementwiseOpDoubleGradWithoutDXDY");
379
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
380
    } else {
381 382
      input_data_type =
          OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "DDX", "DDY");
383
    }
384 385

#ifdef PADDLE_WITH_MKLDNN
386
    if (this->CanMKLDNNBeUsed(ctx)) {
387 388 389 390 391 392 393
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
394 395 396 397 398 399 400 401 402 403 404 405 406

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
407 408
};

409 410 411
template <typename T>
class ElemwiseGradKernel : public framework::OpKernel<T> {
 public:
C
chengduo 已提交
412 413
  void Compute(const framework::ExecutionContext &context) const override {
    auto *dx =
414 415
        context.Output<framework::LoDTensor>(framework::GradVarName("X"));
    if (dx != nullptr) {
C
chengduo 已提交
416
      auto &dout =
417 418 419 420 421 422
          *context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
      dx->set_lod(dout.lod());
    }
  }
};

423 424
DECLARE_INPLACE_OP_INFERER(ElementwiseOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(ElementwiseGradOpInplaceInferer,
425 426
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
427 428
DECLARE_INPLACE_OP_INFERER(ElementwiseDoubleGradOpInplaceInferer,
                           {"DDX", "DDOut"});
D
dzhwinter 已提交
429

430 431 432
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseGradNoBufVarsInferer, "X", "Y");
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseDoubleGradNoBufVarsInferer, "Y",
                                    "DOut");
S
sneaxiy 已提交
433

G
gongweibao 已提交
434 435
}  // namespace operators
}  // namespace paddle
H
hong 已提交
436 437 438 439 440 441 442 443
#define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name)              \
  template <typename T>                                                 \
  class kernel_type##GradMaker                                          \
      : public paddle::framework::SingleGradOpMaker<T> {                \
   public:                                                              \
    using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker; \
                                                                        \
   protected:                                                           \
444
    void Apply(::paddle::framework::GradOpPtr<T> op) const override {   \
H
hong 已提交
445
      op->SetType(#kernel_type "_grad");                                \
446
      op->SetInput("X", this->Input("X"));                              \
H
hong 已提交
447 448 449 450 451 452 453 454 455
      op->SetInput("Y", this->Input("Y"));                              \
      op->SetInput(::paddle::framework::GradVarName("Out"),             \
                   this->OutputGrad("Out"));                            \
      op->SetAttrMap(this->Attrs());                                    \
      op->SetOutput(::paddle::framework::GradVarName("X"),              \
                    this->InputGrad("X"));                              \
      op->SetOutput(::paddle::framework::GradVarName("Y"),              \
                    this->InputGrad("Y"));                              \
    }                                                                   \
456 457
  }

458 459 460 461
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name)    \
  REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp,        \
                    ::paddle::operators::Elementwise##op_name##OpMaker, \
                    ::paddle::operators::ElementwiseOpInferVarType,     \
H
hong 已提交
462 463
                    op_type##GradMaker<::paddle::framework::OpDesc>,    \
                    op_type##GradMaker<::paddle::imperative::OpBase>,   \
464
                    ::paddle::operators::ElementwiseOpInplaceInferer);