elementwise_op.h 22.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
gongweibao 已提交
6

7
    http://www.apache.org/licenses/LICENSE-2.0
G
gongweibao 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
gongweibao 已提交
14 15

#pragma once
C
chengduo 已提交
16

17
#include <algorithm>  // for max
L
liuwei1031 已提交
18
#include <memory>
19
#include <string>
L
liuwei1031 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/data_layout.h"
24
#include "paddle/fluid/framework/op_version_registry.h"
25
#include "paddle/fluid/operators/common_infer_shape_functions.h"
26
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
C
chengduo 已提交
27

28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
G
gongweibao 已提交
31 32 33 34 35 36 37 38 39

namespace paddle {
namespace operators {

class ElementwiseOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  using Tensor = framework::Tensor;
C
chengduo 已提交
40 41

  void InferShape(framework::InferShapeContext *ctx) const override {
42 43 44 45 46 47 48 49 50 51 52
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");

    PADDLE_ENFORCE_EQ(
        ctx->GetInputsVarType("Y").front(),
        framework::proto::VarType::LOD_TENSOR,
        platform::errors::InvalidArgument(
            "The input var's type should be LoDTensor, but the "
            "received is %s [%s].",
            ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
C
chengduo 已提交
53 54

    if (ctx->GetInputsVarType("X").front() ==
55
        framework::proto::VarType::SELECTED_ROWS) {
56 57
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y").size(), 1u,
58 59 60 61 62
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the size of Y should be 1. "
              "But reveived the size of Y = %s.",
              ctx->GetInputDim("Y").size()));
63 64
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y")[0], 1,
65 66 67 68 69
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the first dimension of Y should be 1. "
              "But reveived the first dimension of Y = %s.",
              ctx->GetInputDim("Y")[0]));
70 71
    } else if (ctx->GetInputsVarType("X").front() !=
               framework::proto::VarType::LOD_TENSOR) {
72 73 74 75
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Input X's type[%s] is not supported by elementwise_op. Please set "
          "its type to LOD_TENSOR.",
          ctx->GetInputsVarType("X").front()));
C
chengduo 已提交
76
    }
77

78 79 80 81 82 83 84 85
    if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
      ctx->ShareDim("X", /*->*/ "Out");
      ctx->ShareLoD("X", /*->*/ "Out");
    } else {
      auto x_dims = ctx->GetInputDim("X");
      auto y_dims = ctx->GetInputDim("Y");
      int max_dim = std::max(x_dims.size(), y_dims.size());
      int axis = ctx->Attrs().Get<int>("axis");
86 87 88 89 90 91 92 93
      if (x_dims.size() == y_dims.size()) {
        PADDLE_ENFORCE_EQ((axis == -1) || (axis == 0), true,
                          platform::errors::InvalidArgument(
                              "axis should be -1 or 0 while the dimension of "
                              "tensor X (%s) is equal to the dimension of "
                              "tensor Y (%s), but received axis: %s",
                              x_dims.size(), y_dims.size(), axis));
      }
94 95 96 97 98 99 100
      PADDLE_ENFORCE_EQ((axis >= (-1 * max_dim)) && (axis < max_dim), true,
                        platform::errors::InvalidArgument(
                            "The axis range must be [%s, %s), but axis is %s. "
                            "Please set the axis again.",
                            -1 * max_dim, max_dim, axis));
      axis = (axis < 0 ? (std::abs(x_dims.size() - y_dims.size()) + axis + 1)
                       : axis);
101 102 103 104 105 106 107 108 109 110
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
      GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims_array));
      // to do
      ctx->ShareLoD("X", /*->*/ "Out");
    }
G
gongweibao 已提交
111
  }
112 113

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
114
      const framework::ExecutionContext &ctx) const override {
115 116
    auto input_data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");
117 118

#ifdef PADDLE_WITH_MKLDNN
119
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
120 121 122 123 124 125 126
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
127 128 129

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
130
      const framework::OpKernelType &expected_kernel_type) const override {
131 132 133 134 135 136 137 138 139
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
140 141 142

  framework::KernelSignature GetExpectedPtenKernelArgs(
      const framework::ExecutionContext &ctx) const override {
143
    int axis = ctx.Attr<int>("axis");
144 145
    if (Type() == "elementwise_add") {
      if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
146 147 148 149 150
        if (axis == -1) {
          return framework::KernelSignature("add", {"X", "Y"}, {}, {"Out"});
        }
        return framework::KernelSignature("add_raw", {"X", "Y"}, {"axis"},
                                          {"Out"});
151 152
      }
    }
153 154
    if (Type() == "elementwise_sub") {
      if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
155 156 157 158 159
        if (axis == -1) {
          return framework::KernelSignature("subtract", {"X", "Y"}, {},
                                            {"Out"});
        }
        return framework::KernelSignature("subtract_raw", {"X", "Y"}, {"axis"},
Y
YuanRisheng 已提交
160
                                          {"Out"});
161 162
      }
    }
163 164
    if (Type() == "elementwise_div") {
      if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
165 166 167 168
        if (axis == -1) {
          return framework::KernelSignature("divide", {"X", "Y"}, {}, {"Out"});
        }
        return framework::KernelSignature("divide_raw", {"X", "Y"}, {"axis"},
Y
YuanRisheng 已提交
169
                                          {"Out"});
170 171
      }
    }
Y
YuanRisheng 已提交
172 173
    if (Type() == "elementwise_mul") {
      if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
174 175 176 177 178
        if (axis == -1) {
          return framework::KernelSignature("multiply", {"X", "Y"}, {},
                                            {"Out"});
        }
        return framework::KernelSignature("multiply_raw", {"X", "Y"}, {"axis"},
Y
YuanRisheng 已提交
179
                                          {"Out"});
Y
YuanRisheng 已提交
180 181
      }
    }
182 183
    return framework::KernelSignature("None", {"X"}, {}, {"Out"});
  }
G
gongweibao 已提交
184 185
};

C
chengduo 已提交
186 187 188
class ElementwiseOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
189
  std::unordered_map<std::string, std::string> &GetInputOutputWithSameType()
C
chengduo 已提交
190
      const override {
191 192
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
193 194 195
  }
};

G
gongweibao 已提交
196 197
class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
198
  void Make() final {
199 200 201 202
    AddInputX();
    AddInputY();
    AddOpOutput();

G
gongweibao 已提交
203
    AddAttr<int>("axis",
204 205 206 207
                 "(int, default -1). If X.dimension != Y.dimension,"
                 "Y.dimension must be a subsequence of x.dimension. And axis "
                 "is the start dimension index "
                 "for broadcasting Y onto X. ")
208
        .SetDefault(-1);
209
    AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
210 211
        .SetDefault(false)
        .AsExtra();
212
    AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
213 214
        .SetDefault("")
        .AsExtra();
215
    AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
216 217
        .SetDefault("")
        .AsExtra();
218 219 220 221
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
222 223
        .SetDefault(false)
        .AsExtra();
224 225 226 227
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
228 229
        .InEnum({"float32", "int8", "bfloat16"})
        .AsExtra();
230
    /* int8 parameters */
231 232
    AddAttr<float>("Scale_x",
                   "(float, default 1.0f), The quantize scale of X tensor")
233 234
        .SetDefault(1.0f)
        .AsExtra();
235 236
    AddAttr<float>("Scale_y",
                   "(float, default 1.0f), The quantize scale of Y tensor")
237 238
        .SetDefault(1.0f)
        .AsExtra();
239 240
    AddAttr<float>("Scale_out",
                   "(float, default 1.0f), The quantize scale of output data")
241 242
        .SetDefault(1.0f)
        .AsExtra();
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
    AddOpComment();
  }

 protected:
  virtual void AddInputX() {
    AddInput("X", "(Tensor), The first input tensor of elementwise op.");
  }
  virtual void AddInputY() {
    AddInput("Y", "(Tensor), The second input tensor of elementwise op.");
  }
  virtual void AddOpOutput() {
    AddOutput("Out",
              "N-dimension tensor. A location into which the result is stored. "
              "It's dimension "
              "equals with x");
  }
  virtual void AddOpComment() { AddComment(GetCommentExamples()); }

  virtual std::string GetOpFuntionality() const { return ""; }

  virtual std::string GetName() const = 0;
  virtual std::string GetEquation() const = 0;

  std::string GetCommentExamples() const {
    return string::Sprintf(R"DOC(
Elementwise %s Operator.

%s
K
kexinzhao 已提交
271 272 273

The equation is:

Y
Yu Yang 已提交
274
$$%s$$
K
kexinzhao 已提交
275

276
- $X$: a tensor of any dimension.
L
Luo Tao 已提交
277
- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$.
K
kexinzhao 已提交
278 279

There are two cases for this operator:
280

L
Luo Tao 已提交
281 282
1. The shape of $Y$ is the same with $X$.
2. The shape of $Y$ is a continuous subsequence of $X$.
K
kexinzhao 已提交
283 284

For case 2:
285

286 287
1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
   for broadcasting $Y$ onto $X$.
L
Luo Tao 已提交
288
2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
289
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
L
Luo Tao 已提交
290
   subsequence, such as shape(Y) = (2, 1) => (2).
K
kexinzhao 已提交
291

L
Luo Tao 已提交
292
For example:
293

G
gongweibao 已提交
294
  .. code-block:: text
G
gongweibao 已提交
295

296 297
    shape(X) = (2, 3, 4, 5), shape(Y) = (,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
L
Luo Tao 已提交
298
    shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
299 300
    shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
301
    shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
302

Y
Yu Yang 已提交
303
)DOC",
304
                           GetName(), GetOpFuntionality(), GetEquation());
G
gongweibao 已提交
305 306 307 308 309 310 311 312
  }
};

class ElementwiseOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

C
chengduo 已提交
313
  void InferShape(framework::InferShapeContext *ctx) const override {
314
    auto out_grad_name = framework::GradVarName("Out");
315 316 317
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
    OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
                   "ElementwiseOpGrad");
Q
Qiao Longfei 已提交
318 319 320
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
321 322
      ctx->ShareDim("X", /*->*/ x_grad_name);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
G
gongweibao 已提交
323
    }
Q
Qiao Longfei 已提交
324
    if (ctx->HasOutput(y_grad_name)) {
325 326
      ctx->ShareDim("Y", /*->*/ y_grad_name);
      ctx->ShareLoD("Y", /*->*/ y_grad_name);
G
gongweibao 已提交
327 328
    }
  }
329 330

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
331
      const framework::ExecutionContext &ctx) const override {
332 333
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));
334 335

#ifdef PADDLE_WITH_MKLDNN
336
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
337 338 339 340 341 342 343
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
C
chentianyu03 已提交
344 345 346 347 348 349 350 351 352 353 354 355 356

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
G
gongweibao 已提交
357
};
358

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->ShareDim("X", x_grad_name);
      ctx->ShareLoD("X", x_grad_name);
    }
    if (ctx->HasOutput(y_grad_name)) {
      ctx->ShareDim("Y", y_grad_name);
      ctx->ShareLoD("Y", y_grad_name);
    }
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
383
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
384 385

#ifdef PADDLE_WITH_MKLDNN
386
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
387 388 389 390 391 392 393
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
C
chentianyu03 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
};

class ElementwiseOpDoubleGradWithoutDXDY
    : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
424 425
    framework::proto::VarType::Type input_data_type;
    if (ctx.HasInput("DDX") == false) {
426 427
      OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
                     "ElementwiseOpDoubleGradWithoutDXDY");
428
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
429
    } else if (ctx.HasInput("DDY") == false) {
430 431
      OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
                     "ElementwiseOpDoubleGradWithoutDXDY");
432
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
433
    } else {
434 435
      input_data_type =
          OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "DDX", "DDY");
436
    }
437 438

#ifdef PADDLE_WITH_MKLDNN
439
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
440 441 442 443 444 445 446
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
447 448 449 450 451 452 453 454 455 456 457 458 459

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
460 461
};

462 463 464 465 466 467 468 469 470 471 472 473 474 475
class ElementwiseOpTripleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("D_DDX")) {
      ctx->ShareDim("DDX", "D_DDX");
      ctx->ShareLoD("DDX", "D_DDX");
    }
    if (ctx->HasOutput("D_DDY")) {
      ctx->ShareDim("DDY", "D_DDY");
      ctx->ShareLoD("DDY", "D_DDY");
    }
476 477 478 479 480 481 482 483 484 485 486 487
    if (ctx->HasOutput("D_X")) {
      ctx->ShareDim("X", "D_X");
      ctx->ShareLoD("X", "D_X");
    }
    if (ctx->HasOutput("D_Y")) {
      ctx->ShareDim("Y", "D_Y");
      ctx->ShareLoD("Y", "D_Y");
    }
    if (ctx->HasOutput("D_DOut")) {
      ctx->ShareDim("DOut", "D_DOut");
      ctx->ShareLoD("DOut", "D_DOut");
    }
488 489 490 491 492
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    framework::proto::VarType::Type input_data_type;
493
    input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "D_DDOut");
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
};

519 520 521
template <typename T>
class ElemwiseGradKernel : public framework::OpKernel<T> {
 public:
C
chengduo 已提交
522 523
  void Compute(const framework::ExecutionContext &context) const override {
    auto *dx =
524 525
        context.Output<framework::LoDTensor>(framework::GradVarName("X"));
    if (dx != nullptr) {
C
chengduo 已提交
526
      auto &dout =
527 528 529 530 531 532
          *context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
      dx->set_lod(dout.lod());
    }
  }
};

533 534
DECLARE_INPLACE_OP_INFERER(ElementwiseOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(ElementwiseGradOpInplaceInferer,
535 536
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
537 538
DECLARE_INPLACE_OP_INFERER(ElementwiseDoubleGradOpInplaceInferer,
                           {"DDX", "DDOut"});
D
dzhwinter 已提交
539

540 541 542
DECLARE_INPLACE_OP_INFERER(ElementwiseTripleGradOpInplaceInferer,
                           {"D_DDOut", "D_DDX"});

543 544 545
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseGradNoBufVarsInferer, "X", "Y");
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseDoubleGradNoBufVarsInferer, "Y",
                                    "DOut");
546 547
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseTripleGradNoBufVarsInferer,
                                    "DDX", "DDY");
S
sneaxiy 已提交
548

G
gongweibao 已提交
549 550
}  // namespace operators
}  // namespace paddle
H
hong 已提交
551 552 553 554 555 556 557 558
#define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name)              \
  template <typename T>                                                 \
  class kernel_type##GradMaker                                          \
      : public paddle::framework::SingleGradOpMaker<T> {                \
   public:                                                              \
    using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker; \
                                                                        \
   protected:                                                           \
559
    void Apply(::paddle::framework::GradOpPtr<T> op) const override {   \
H
hong 已提交
560
      op->SetType(#kernel_type "_grad");                                \
561
      op->SetInput("X", this->Input("X"));                              \
H
hong 已提交
562 563 564 565 566 567 568 569 570
      op->SetInput("Y", this->Input("Y"));                              \
      op->SetInput(::paddle::framework::GradVarName("Out"),             \
                   this->OutputGrad("Out"));                            \
      op->SetAttrMap(this->Attrs());                                    \
      op->SetOutput(::paddle::framework::GradVarName("X"),              \
                    this->InputGrad("X"));                              \
      op->SetOutput(::paddle::framework::GradVarName("Y"),              \
                    this->InputGrad("Y"));                              \
    }                                                                   \
571 572
  }

573 574 575 576
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name)    \
  REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp,        \
                    ::paddle::operators::Elementwise##op_name##OpMaker, \
                    ::paddle::operators::ElementwiseOpInferVarType,     \
H
hong 已提交
577 578
                    op_type##GradMaker<::paddle::framework::OpDesc>,    \
                    op_type##GradMaker<::paddle::imperative::OpBase>,   \
579
                    ::paddle::operators::ElementwiseOpInplaceInferer);