elementwise_op.h 22.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
gongweibao 已提交
6

7
    http://www.apache.org/licenses/LICENSE-2.0
G
gongweibao 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
gongweibao 已提交
14 15

#pragma once
C
chengduo 已提交
16

17
#include <algorithm>  // for max
L
liuwei1031 已提交
18
#include <memory>
19
#include <string>
L
liuwei1031 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/data_layout.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/op_registry.h"
25
#include "paddle/fluid/framework/op_version_registry.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/operator.h"
27
#include "paddle/fluid/operators/common_infer_shape_functions.h"
28
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
C
chengduo 已提交
29

30 31 32
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
G
gongweibao 已提交
33 34 35 36 37 38 39 40 41

namespace paddle {
namespace operators {

class ElementwiseOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  using Tensor = framework::Tensor;
C
chengduo 已提交
42 43

  void InferShape(framework::InferShapeContext *ctx) const override {
44 45 46 47 48 49 50 51 52 53 54
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");

    PADDLE_ENFORCE_EQ(
        ctx->GetInputsVarType("Y").front(),
        framework::proto::VarType::LOD_TENSOR,
        platform::errors::InvalidArgument(
            "The input var's type should be LoDTensor, but the "
            "received is %s [%s].",
            ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
C
chengduo 已提交
55 56

    if (ctx->GetInputsVarType("X").front() ==
57
        framework::proto::VarType::SELECTED_ROWS) {
58 59
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y").size(), 1u,
60 61 62 63 64
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the size of Y should be 1. "
              "But reveived the size of Y = %s.",
              ctx->GetInputDim("Y").size()));
65 66
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y")[0], 1,
67 68 69 70 71
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the first dimension of Y should be 1. "
              "But reveived the first dimension of Y = %s.",
              ctx->GetInputDim("Y")[0]));
72 73
    } else if (ctx->GetInputsVarType("X").front() !=
               framework::proto::VarType::LOD_TENSOR) {
74 75 76 77
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Input X's type[%s] is not supported by elementwise_op. Please set "
          "its type to LOD_TENSOR.",
          ctx->GetInputsVarType("X").front()));
C
chengduo 已提交
78
    }
79

80 81 82 83 84 85 86 87
    if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
      ctx->ShareDim("X", /*->*/ "Out");
      ctx->ShareLoD("X", /*->*/ "Out");
    } else {
      auto x_dims = ctx->GetInputDim("X");
      auto y_dims = ctx->GetInputDim("Y");
      int max_dim = std::max(x_dims.size(), y_dims.size());
      int axis = ctx->Attrs().Get<int>("axis");
88 89 90 91 92 93 94 95
      if (x_dims.size() == y_dims.size()) {
        PADDLE_ENFORCE_EQ((axis == -1) || (axis == 0), true,
                          platform::errors::InvalidArgument(
                              "axis should be -1 or 0 while the dimension of "
                              "tensor X (%s) is equal to the dimension of "
                              "tensor Y (%s), but received axis: %s",
                              x_dims.size(), y_dims.size(), axis));
      }
96 97 98 99 100 101 102
      PADDLE_ENFORCE_EQ((axis >= (-1 * max_dim)) && (axis < max_dim), true,
                        platform::errors::InvalidArgument(
                            "The axis range must be [%s, %s), but axis is %s. "
                            "Please set the axis again.",
                            -1 * max_dim, max_dim, axis));
      axis = (axis < 0 ? (std::abs(x_dims.size() - y_dims.size()) + axis + 1)
                       : axis);
103 104 105 106 107 108 109 110 111 112
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
      GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
      ctx->SetOutputDim("Out", framework::make_ddim(out_dims_array));
      // to do
      ctx->ShareLoD("X", /*->*/ "Out");
    }
G
gongweibao 已提交
113
  }
114 115

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
116
      const framework::ExecutionContext &ctx) const override {
117 118
    auto input_data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");
119 120

#ifdef PADDLE_WITH_MKLDNN
121
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
122 123 124 125 126 127 128
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
129 130 131

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
132
      const framework::OpKernelType &expected_kernel_type) const override {
133 134 135 136 137 138 139 140 141
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
142 143 144 145 146 147 148 149 150

  framework::KernelSignature GetExpectedPtenKernelArgs(
      const framework::ExecutionContext &ctx) const override {
    if (Type() == "elementwise_add") {
      if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
        return framework::KernelSignature("elementwise_add", {"X", "Y"},
                                          {"axis"}, {"Out"});
      }
    }
151 152 153 154 155 156
    if (Type() == "elementwise_sub") {
      if (ctx.InputVar("X")->IsType<framework::LoDTensor>()) {
        return framework::KernelSignature("elementwise_sub", {"X", "Y"},
                                          {"axis"}, {"Out"});
      }
    }
157 158
    return framework::KernelSignature("None", {"X"}, {}, {"Out"});
  }
G
gongweibao 已提交
159 160
};

C
chengduo 已提交
161 162 163
class ElementwiseOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
164
  std::unordered_map<std::string, std::string> &GetInputOutputWithSameType()
C
chengduo 已提交
165
      const override {
166 167
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
168 169 170
  }
};

G
gongweibao 已提交
171 172
class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
173
  void Make() final {
174 175 176 177
    AddInputX();
    AddInputY();
    AddOpOutput();

G
gongweibao 已提交
178
    AddAttr<int>("axis",
179 180 181 182
                 "(int, default -1). If X.dimension != Y.dimension,"
                 "Y.dimension must be a subsequence of x.dimension. And axis "
                 "is the start dimension index "
                 "for broadcasting Y onto X. ")
183
        .SetDefault(-1);
184
    AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
185 186
        .SetDefault(false)
        .AsExtra();
187
    AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
188 189
        .SetDefault("")
        .AsExtra();
190
    AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
191 192
        .SetDefault("")
        .AsExtra();
193 194 195 196
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
197 198
        .SetDefault(false)
        .AsExtra();
199 200 201 202
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
203 204
        .InEnum({"float32", "int8", "bfloat16"})
        .AsExtra();
205
    /* int8 parameters */
206 207
    AddAttr<float>("Scale_x",
                   "(float, default 1.0f), The quantize scale of X tensor")
208 209
        .SetDefault(1.0f)
        .AsExtra();
210 211
    AddAttr<float>("Scale_y",
                   "(float, default 1.0f), The quantize scale of Y tensor")
212 213
        .SetDefault(1.0f)
        .AsExtra();
214 215
    AddAttr<float>("Scale_out",
                   "(float, default 1.0f), The quantize scale of output data")
216 217
        .SetDefault(1.0f)
        .AsExtra();
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
    AddOpComment();
  }

 protected:
  virtual void AddInputX() {
    AddInput("X", "(Tensor), The first input tensor of elementwise op.");
  }
  virtual void AddInputY() {
    AddInput("Y", "(Tensor), The second input tensor of elementwise op.");
  }
  virtual void AddOpOutput() {
    AddOutput("Out",
              "N-dimension tensor. A location into which the result is stored. "
              "It's dimension "
              "equals with x");
  }
  virtual void AddOpComment() { AddComment(GetCommentExamples()); }

  virtual std::string GetOpFuntionality() const { return ""; }

  virtual std::string GetName() const = 0;
  virtual std::string GetEquation() const = 0;

  std::string GetCommentExamples() const {
    return string::Sprintf(R"DOC(
Elementwise %s Operator.

%s
K
kexinzhao 已提交
246 247 248

The equation is:

Y
Yu Yang 已提交
249
$$%s$$
K
kexinzhao 已提交
250

251
- $X$: a tensor of any dimension.
L
Luo Tao 已提交
252
- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$.
K
kexinzhao 已提交
253 254

There are two cases for this operator:
255

L
Luo Tao 已提交
256 257
1. The shape of $Y$ is the same with $X$.
2. The shape of $Y$ is a continuous subsequence of $X$.
K
kexinzhao 已提交
258 259

For case 2:
260

261 262
1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
   for broadcasting $Y$ onto $X$.
L
Luo Tao 已提交
263
2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
264
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
L
Luo Tao 已提交
265
   subsequence, such as shape(Y) = (2, 1) => (2).
K
kexinzhao 已提交
266

L
Luo Tao 已提交
267
For example:
268

G
gongweibao 已提交
269
  .. code-block:: text
G
gongweibao 已提交
270

271 272
    shape(X) = (2, 3, 4, 5), shape(Y) = (,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
L
Luo Tao 已提交
273
    shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
274 275
    shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
276
    shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
277

Y
Yu Yang 已提交
278
)DOC",
279
                           GetName(), GetOpFuntionality(), GetEquation());
G
gongweibao 已提交
280 281 282 283 284 285 286 287
  }
};

class ElementwiseOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

C
chengduo 已提交
288
  void InferShape(framework::InferShapeContext *ctx) const override {
289
    auto out_grad_name = framework::GradVarName("Out");
290 291 292
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
    OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
                   "ElementwiseOpGrad");
Q
Qiao Longfei 已提交
293 294 295
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
296 297
      ctx->ShareDim("X", /*->*/ x_grad_name);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
G
gongweibao 已提交
298
    }
Q
Qiao Longfei 已提交
299
    if (ctx->HasOutput(y_grad_name)) {
300 301
      ctx->ShareDim("Y", /*->*/ y_grad_name);
      ctx->ShareLoD("Y", /*->*/ y_grad_name);
G
gongweibao 已提交
302 303
    }
  }
304 305

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
306
      const framework::ExecutionContext &ctx) const override {
307 308
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));
309 310

#ifdef PADDLE_WITH_MKLDNN
311
    // If broadcasting is needed, use native implementation
312
    auto CanMKLDNNElementwiseGradBeUsed = [&]() {
313 314 315 316
      auto dx_dims = ctx.Input<Tensor>("X")->dims();
      auto dy_dims = ctx.Input<Tensor>("Y")->dims();
      // No broadcast or broadcasting of data on inner dims is supported
      return (dx_dims[dx_dims.size() - 1] == dy_dims[dy_dims.size() - 1]);
317 318
    };

319
    if (this->CanMKLDNNBeUsed(ctx, input_data_type) &&
320
        CanMKLDNNElementwiseGradBeUsed()) {
321 322 323 324 325 326 327
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
C
chentianyu03 已提交
328 329 330 331 332 333 334 335 336 337 338 339 340

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
G
gongweibao 已提交
341
};
342

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->ShareDim("X", x_grad_name);
      ctx->ShareLoD("X", x_grad_name);
    }
    if (ctx->HasOutput(y_grad_name)) {
      ctx->ShareDim("Y", y_grad_name);
      ctx->ShareLoD("Y", y_grad_name);
    }
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
367
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
368 369

#ifdef PADDLE_WITH_MKLDNN
370
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
371 372 373 374 375 376 377
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
C
chentianyu03 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
};

class ElementwiseOpDoubleGradWithoutDXDY
    : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
408 409
    framework::proto::VarType::Type input_data_type;
    if (ctx.HasInput("DDX") == false) {
410 411
      OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
                     "ElementwiseOpDoubleGradWithoutDXDY");
412
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
413
    } else if (ctx.HasInput("DDY") == false) {
414 415
      OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
                     "ElementwiseOpDoubleGradWithoutDXDY");
416
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
417
    } else {
418 419
      input_data_type =
          OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "DDX", "DDY");
420
    }
421 422

#ifdef PADDLE_WITH_MKLDNN
423
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
424 425 426 427 428 429 430
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
431 432 433 434 435 436 437 438 439 440 441 442 443

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
444 445
};

446 447 448 449 450 451 452 453 454 455 456 457 458 459
class ElementwiseOpTripleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("D_DDX")) {
      ctx->ShareDim("DDX", "D_DDX");
      ctx->ShareLoD("DDX", "D_DDX");
    }
    if (ctx->HasOutput("D_DDY")) {
      ctx->ShareDim("DDY", "D_DDY");
      ctx->ShareLoD("DDY", "D_DDY");
    }
460 461 462 463 464 465 466 467 468 469 470 471
    if (ctx->HasOutput("D_X")) {
      ctx->ShareDim("X", "D_X");
      ctx->ShareLoD("X", "D_X");
    }
    if (ctx->HasOutput("D_Y")) {
      ctx->ShareDim("Y", "D_Y");
      ctx->ShareLoD("Y", "D_Y");
    }
    if (ctx->HasOutput("D_DOut")) {
      ctx->ShareDim("DOut", "D_DOut");
      ctx->ShareLoD("DOut", "D_DOut");
    }
472 473 474 475 476
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    framework::proto::VarType::Type input_data_type;
477
    input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "D_DDOut");
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
      return framework::OpKernelType(tensor.type(), tensor.place(),
                                     tensor.layout());
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
};

503 504 505
template <typename T>
class ElemwiseGradKernel : public framework::OpKernel<T> {
 public:
C
chengduo 已提交
506 507
  void Compute(const framework::ExecutionContext &context) const override {
    auto *dx =
508 509
        context.Output<framework::LoDTensor>(framework::GradVarName("X"));
    if (dx != nullptr) {
C
chengduo 已提交
510
      auto &dout =
511 512 513 514 515 516
          *context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
      dx->set_lod(dout.lod());
    }
  }
};

517 518
DECLARE_INPLACE_OP_INFERER(ElementwiseOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(ElementwiseGradOpInplaceInferer,
519 520
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
521 522
DECLARE_INPLACE_OP_INFERER(ElementwiseDoubleGradOpInplaceInferer,
                           {"DDX", "DDOut"});
D
dzhwinter 已提交
523

524 525 526
DECLARE_INPLACE_OP_INFERER(ElementwiseTripleGradOpInplaceInferer,
                           {"D_DDOut", "D_DDX"});

527 528 529
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseGradNoBufVarsInferer, "X", "Y");
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseDoubleGradNoBufVarsInferer, "Y",
                                    "DOut");
530 531
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseTripleGradNoBufVarsInferer,
                                    "DDX", "DDY");
S
sneaxiy 已提交
532

G
gongweibao 已提交
533 534
}  // namespace operators
}  // namespace paddle
H
hong 已提交
535 536 537 538 539 540 541 542
#define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name)              \
  template <typename T>                                                 \
  class kernel_type##GradMaker                                          \
      : public paddle::framework::SingleGradOpMaker<T> {                \
   public:                                                              \
    using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker; \
                                                                        \
   protected:                                                           \
543
    void Apply(::paddle::framework::GradOpPtr<T> op) const override {   \
H
hong 已提交
544
      op->SetType(#kernel_type "_grad");                                \
545
      op->SetInput("X", this->Input("X"));                              \
H
hong 已提交
546 547 548 549 550 551 552 553 554
      op->SetInput("Y", this->Input("Y"));                              \
      op->SetInput(::paddle::framework::GradVarName("Out"),             \
                   this->OutputGrad("Out"));                            \
      op->SetAttrMap(this->Attrs());                                    \
      op->SetOutput(::paddle::framework::GradVarName("X"),              \
                    this->InputGrad("X"));                              \
      op->SetOutput(::paddle::framework::GradVarName("Y"),              \
                    this->InputGrad("Y"));                              \
    }                                                                   \
555 556
  }

557 558 559 560
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name)    \
  REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp,        \
                    ::paddle::operators::Elementwise##op_name##OpMaker, \
                    ::paddle::operators::ElementwiseOpInferVarType,     \
H
hong 已提交
561 562
                    op_type##GradMaker<::paddle::framework::OpDesc>,    \
                    op_type##GradMaker<::paddle::imperative::OpBase>,   \
563
                    ::paddle::operators::ElementwiseOpInplaceInferer);