elementwise_op.h 22.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
gongweibao 已提交
2

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
gongweibao 已提交
6

7
    http://www.apache.org/licenses/LICENSE-2.0
G
gongweibao 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
gongweibao 已提交
14 15

#pragma once
C
chengduo 已提交
16

17
#include <algorithm>  // for max
L
liuwei1031 已提交
18
#include <memory>
19
#include <string>
L
liuwei1031 已提交
20
#include <unordered_map>
21
#include <vector>
22

23
#include "paddle/fluid/framework/data_layout.h"
24
#include "paddle/fluid/framework/op_version_registry.h"
25
#include "paddle/fluid/operators/common_infer_shape_functions.h"
26
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
C
chengduo 已提交
27

28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
G
gongweibao 已提交
31 32 33 34 35 36 37 38 39

namespace paddle {
namespace operators {

class ElementwiseOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  using Tensor = framework::Tensor;
C
chengduo 已提交
40 41

  void InferShape(framework::InferShapeContext *ctx) const override {
42 43 44 45 46 47 48 49 50 51 52
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ElementwiseOp");

    PADDLE_ENFORCE_EQ(
        ctx->GetInputsVarType("Y").front(),
        framework::proto::VarType::LOD_TENSOR,
        platform::errors::InvalidArgument(
            "The input var's type should be LoDTensor, but the "
            "received is %s [%s].",
            ctx->GetInputsVarType("Y").front(), ctx->Inputs("Y").front()));
C
chengduo 已提交
53 54

    if (ctx->GetInputsVarType("X").front() ==
55
        framework::proto::VarType::SELECTED_ROWS) {
56 57
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y").size(), 1u,
58 59 60 61 62
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the size of Y should be 1. "
              "But reveived the size of Y = %s.",
              ctx->GetInputDim("Y").size()));
63 64
      PADDLE_ENFORCE_EQ(
          ctx->GetInputDim("Y")[0], 1,
65 66 67 68 69
          platform::errors::InvalidArgument(
              "For elementwise_op, if X is Sparse(VarType.SELECTED_ROWS"
              "), Y must be scalar, the first dimension of Y should be 1. "
              "But reveived the first dimension of Y = %s.",
              ctx->GetInputDim("Y")[0]));
70 71
    } else if (ctx->GetInputsVarType("X").front() !=
               framework::proto::VarType::LOD_TENSOR) {
72 73 74 75
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Input X's type[%s] is not supported by elementwise_op. Please set "
          "its type to LOD_TENSOR.",
          ctx->GetInputsVarType("X").front()));
C
chengduo 已提交
76
    }
77

78 79 80 81 82 83 84 85
    if (ctx->GetInputDim("X") == ctx->GetInputDim("Y")) {
      ctx->ShareDim("X", /*->*/ "Out");
      ctx->ShareLoD("X", /*->*/ "Out");
    } else {
      auto x_dims = ctx->GetInputDim("X");
      auto y_dims = ctx->GetInputDim("Y");
      int max_dim = std::max(x_dims.size(), y_dims.size());
      int axis = ctx->Attrs().Get<int>("axis");
86 87 88 89 90 91 92 93
      if (x_dims.size() == y_dims.size()) {
        PADDLE_ENFORCE_EQ((axis == -1) || (axis == 0), true,
                          platform::errors::InvalidArgument(
                              "axis should be -1 or 0 while the dimension of "
                              "tensor X (%s) is equal to the dimension of "
                              "tensor Y (%s), but received axis: %s",
                              x_dims.size(), y_dims.size(), axis));
      }
94 95 96 97 98 99 100
      PADDLE_ENFORCE_EQ((axis >= (-1 * max_dim)) && (axis < max_dim), true,
                        platform::errors::InvalidArgument(
                            "The axis range must be [%s, %s), but axis is %s. "
                            "Please set the axis again.",
                            -1 * max_dim, max_dim, axis));
      axis = (axis < 0 ? (std::abs(x_dims.size() - y_dims.size()) + axis + 1)
                       : axis);
101 102 103
      std::vector<int> x_dims_array(max_dim);
      std::vector<int> y_dims_array(max_dim);
      std::vector<int> out_dims_array(max_dim);
J
Jacek Czaja 已提交
104 105
#ifdef PADDLE_WITH_MKLDNN
      // (jczaja): Broadcasting of dims has to be done on Paddle shapes (NHWC)
106
      // if model is using NHWC and any of shapes in at least 3D
J
Jacek Czaja 已提交
107 108 109
      bool should_rotate =
          ctx->IsRunMKLDNNKernel() &&
          (platform::MKLDNNDeviceContext::tls().get_cur_paddle_data_layout() ==
110 111
           framework::DataLayout::kNHWC) &&
          (x_dims.size() >= 3 || y_dims.size() >= 3);
J
Jacek Czaja 已提交
112 113 114
      if (should_rotate) {
        // Pick bigger shape and rotate this one
        bool x_over_y = (x_dims.size() > y_dims.size());
115 116
        auto vdims = x_over_y ? phi::vectorize<int>(x_dims)
                              : phi::vectorize<int>(y_dims);
J
Jacek Czaja 已提交
117 118
        std::rotate(vdims.begin() + 1, vdims.begin() + 2, vdims.end());
        if (x_over_y) {
119
          x_dims = phi::make_ddim(vdims);
J
Jacek Czaja 已提交
120
        } else {
121
          y_dims = phi::make_ddim(vdims);
J
Jacek Czaja 已提交
122 123 124 125
        }
      }
#endif

126 127 128
      GetBroadcastDimsArrays(x_dims, y_dims, x_dims_array.data(),
                             y_dims_array.data(), out_dims_array.data(),
                             max_dim, axis);
J
Jacek Czaja 已提交
129 130 131 132 133 134 135
#ifdef PADDLE_WITH_MKLDNN
      // Now rotate shape back if needed (NHWC -> NCHW)
      if (should_rotate) {
        std::rotate(out_dims_array.begin() + 1, out_dims_array.end() - 1,
                    out_dims_array.end());
      }
#endif
136
      ctx->SetOutputDim("Out", phi::make_ddim(out_dims_array));
137 138 139
      // to do
      ctx->ShareLoD("X", /*->*/ "Out");
    }
G
gongweibao 已提交
140
  }
141 142

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
143
      const framework::ExecutionContext &ctx) const override {
144 145
    auto input_data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");
146 147

#ifdef PADDLE_WITH_MKLDNN
148
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
149 150 151 152 153 154 155
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
156 157 158

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
159
      const framework::OpKernelType &expected_kernel_type) const override {
160 161
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
162 163 164
      return framework::OpKernelType(
          framework::TransToProtoVarType(tensor.dtype()), tensor.place(),
          tensor.layout());
165
    } else {
J
Jacek Czaja 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
#ifdef PADDLE_WITH_MKLDNN
      // When elementwise is first oneDNN op (there was some non oneDNN op
      // previously)
      // then we also need to rotate shape NHWC -> NCWH
      if ((expected_kernel_type.data_layout_ ==
           framework::DataLayout::kMKLDNN) &&
          (tensor.layout() != framework::DataLayout::kMKLDNN) &&
          paddle::platform::MKLDNNDeviceContext::tls()
                  .get_cur_paddle_data_layout() ==
              framework::DataLayout::kNHWC) {
        return framework::OpKernelType(expected_kernel_type.data_type_,
                                       tensor.place(),
                                       framework::DataLayout::kNHWC);
      }
#endif
181 182 183 184
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
G
gongweibao 已提交
185 186
};

C
chengduo 已提交
187 188 189
class ElementwiseOpInferVarType
    : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
190
  std::unordered_map<std::string, std::string> &GetInputOutputWithSameType()
C
chengduo 已提交
191
      const override {
192 193
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
194 195 196
  }
};

G
gongweibao 已提交
197 198
class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
199
  void Make() final {
200 201 202 203
    AddInputX();
    AddInputY();
    AddOpOutput();

G
gongweibao 已提交
204
    AddAttr<int>("axis",
205 206 207 208
                 "(int, default -1). If X.dimension != Y.dimension,"
                 "Y.dimension must be a subsequence of x.dimension. And axis "
                 "is the start dimension index "
                 "for broadcasting Y onto X. ")
209
        .SetDefault(-1);
210
    AddAttr<bool>("use_mkldnn", "(bool, default false). Used by MKLDNN.")
211 212
        .SetDefault(false)
        .AsExtra();
213
    AddAttr<std::string>("x_data_format", "This parameter is no longer used.")
214 215
        .SetDefault("")
        .AsExtra();
216
    AddAttr<std::string>("y_data_format", "This parameter is no longer used.")
217 218
        .SetDefault("")
        .AsExtra();
219 220 221 222
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
223 224
        .SetDefault(false)
        .AsExtra();
225 226 227 228
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
229 230
        .InEnum({"float32", "int8", "bfloat16"})
        .AsExtra();
231
    /* int8 parameters */
232 233
    AddAttr<float>("Scale_x",
                   "(float, default 1.0f), The quantize scale of X tensor")
234 235
        .SetDefault(1.0f)
        .AsExtra();
236 237
    AddAttr<float>("Scale_y",
                   "(float, default 1.0f), The quantize scale of Y tensor")
238 239
        .SetDefault(1.0f)
        .AsExtra();
240 241
    AddAttr<float>("Scale_out",
                   "(float, default 1.0f), The quantize scale of output data")
242 243
        .SetDefault(1.0f)
        .AsExtra();
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
    AddOpComment();
  }

 protected:
  virtual void AddInputX() {
    AddInput("X", "(Tensor), The first input tensor of elementwise op.");
  }
  virtual void AddInputY() {
    AddInput("Y", "(Tensor), The second input tensor of elementwise op.");
  }
  virtual void AddOpOutput() {
    AddOutput("Out",
              "N-dimension tensor. A location into which the result is stored. "
              "It's dimension "
              "equals with x");
  }
  virtual void AddOpComment() { AddComment(GetCommentExamples()); }

  virtual std::string GetOpFuntionality() const { return ""; }

  virtual std::string GetName() const = 0;
  virtual std::string GetEquation() const = 0;

  std::string GetCommentExamples() const {
    return string::Sprintf(R"DOC(
Elementwise %s Operator.

%s
K
kexinzhao 已提交
272 273 274

The equation is:

Y
Yu Yang 已提交
275
$$%s$$
K
kexinzhao 已提交
276

277
- $X$: a tensor of any dimension.
L
Luo Tao 已提交
278
- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$.
K
kexinzhao 已提交
279 280

There are two cases for this operator:
281

L
Luo Tao 已提交
282 283
1. The shape of $Y$ is the same with $X$.
2. The shape of $Y$ is a continuous subsequence of $X$.
K
kexinzhao 已提交
284 285

For case 2:
286

287 288
1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
   for broadcasting $Y$ onto $X$.
L
Luo Tao 已提交
289
2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
290
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
L
Luo Tao 已提交
291
   subsequence, such as shape(Y) = (2, 1) => (2).
K
kexinzhao 已提交
292

L
Luo Tao 已提交
293
For example:
294

G
gongweibao 已提交
295
  .. code-block:: text
G
gongweibao 已提交
296

297 298
    shape(X) = (2, 3, 4, 5), shape(Y) = (,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
L
Luo Tao 已提交
299
    shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
300 301
    shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
302
    shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
303

Y
Yu Yang 已提交
304
)DOC",
305
                           GetName(), GetOpFuntionality(), GetEquation());
G
gongweibao 已提交
306 307 308 309 310 311 312 313
  }
};

class ElementwiseOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

C
chengduo 已提交
314
  void InferShape(framework::InferShapeContext *ctx) const override {
315
    auto out_grad_name = framework::GradVarName("Out");
316 317 318
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "ElementwiseOpGrad");
    OP_INOUT_CHECK(ctx->HasInput(out_grad_name), "Input", out_grad_name,
                   "ElementwiseOpGrad");
Q
Qiao Longfei 已提交
319 320 321
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
322 323
      ctx->ShareDim("X", /*->*/ x_grad_name);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
G
gongweibao 已提交
324
    }
Q
Qiao Longfei 已提交
325
    if (ctx->HasOutput(y_grad_name)) {
326 327
      ctx->ShareDim("Y", /*->*/ y_grad_name);
      ctx->ShareLoD("Y", /*->*/ y_grad_name);
G
gongweibao 已提交
328 329
    }
  }
330 331

  framework::OpKernelType GetExpectedKernelType(
C
chengduo 已提交
332
      const framework::ExecutionContext &ctx) const override {
333 334
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(
        ctx, framework::GradVarName("Out"));
335 336

#ifdef PADDLE_WITH_MKLDNN
337
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
338 339 340 341 342 343 344
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
C
chentianyu03 已提交
345 346 347 348 349 350

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const override {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
351 352 353
      return framework::OpKernelType(
          framework::TransToProtoVarType(tensor.dtype()), tensor.place(),
          tensor.layout());
C
chentianyu03 已提交
354 355 356 357 358
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
G
gongweibao 已提交
359
};
360

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
class ElementwiseOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->ShareDim("X", x_grad_name);
      ctx->ShareLoD("X", x_grad_name);
    }
    if (ctx->HasOutput(y_grad_name)) {
      ctx->ShareDim("Y", y_grad_name);
      ctx->ShareLoD("Y", y_grad_name);
    }
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
385
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
386 387

#ifdef PADDLE_WITH_MKLDNN
388
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
389 390 391 392 393 394 395
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
C
chentianyu03 已提交
396 397 398 399 400 401

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
402 403 404
      return framework::OpKernelType(
          framework::TransToProtoVarType(tensor.dtype()), tensor.place(),
          tensor.layout());
C
chentianyu03 已提交
405 406 407 408 409
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
};

class ElementwiseOpDoubleGradWithoutDXDY
    : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("DDOut")) {
      ctx->ShareDim("DOut", "DDOut");
      ctx->ShareLoD("DOut", "DDOut");
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
427 428
    framework::proto::VarType::Type input_data_type;
    if (ctx.HasInput("DDX") == false) {
429 430
      OP_INOUT_CHECK(ctx.HasInput("DDY"), "Input", "DDY",
                     "ElementwiseOpDoubleGradWithoutDXDY");
431
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDY");
432
    } else if (ctx.HasInput("DDY") == false) {
433 434
      OP_INOUT_CHECK(ctx.HasInput("DDX"), "Input", "DDX",
                     "ElementwiseOpDoubleGradWithoutDXDY");
435
      input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
436
    } else {
437 438
      input_data_type =
          OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "DDX", "DDY");
439
    }
440 441

#ifdef PADDLE_WITH_MKLDNN
442
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
443 444 445 446 447 448 449
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
450 451 452 453 454 455

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
456 457 458
      return framework::OpKernelType(
          framework::TransToProtoVarType(tensor.dtype()), tensor.place(),
          tensor.layout());
459 460 461 462 463
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
464 465
};

466 467 468 469 470 471 472 473 474 475 476 477 478 479
class ElementwiseOpTripleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  using Tensor = framework::Tensor;

  void InferShape(framework::InferShapeContext *ctx) const override {
    if (ctx->HasOutput("D_DDX")) {
      ctx->ShareDim("DDX", "D_DDX");
      ctx->ShareLoD("DDX", "D_DDX");
    }
    if (ctx->HasOutput("D_DDY")) {
      ctx->ShareDim("DDY", "D_DDY");
      ctx->ShareLoD("DDY", "D_DDY");
    }
480 481 482 483 484 485 486 487 488 489 490 491
    if (ctx->HasOutput("D_X")) {
      ctx->ShareDim("X", "D_X");
      ctx->ShareLoD("X", "D_X");
    }
    if (ctx->HasOutput("D_Y")) {
      ctx->ShareDim("Y", "D_Y");
      ctx->ShareLoD("Y", "D_Y");
    }
    if (ctx->HasOutput("D_DOut")) {
      ctx->ShareDim("DOut", "D_DOut");
      ctx->ShareLoD("DOut", "D_DOut");
    }
492 493 494 495 496
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    framework::proto::VarType::Type input_data_type;
497
    input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "D_DDOut");
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const framework::Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
514 515 516
      return framework::OpKernelType(
          framework::TransToProtoVarType(tensor.dtype()), tensor.place(),
          tensor.layout());
517 518 519 520 521 522 523
    } else {
      return framework::OpKernelType(expected_kernel_type.data_type_,
                                     tensor.place(), tensor.layout());
    }
  }
};

524 525 526
template <typename T>
class ElemwiseGradKernel : public framework::OpKernel<T> {
 public:
C
chengduo 已提交
527 528
  void Compute(const framework::ExecutionContext &context) const override {
    auto *dx =
529
        context.Output<framework::LoDTensor>(framework::GradVarName("X"));
530 531
    auto &dout =
        *context.Input<framework::LoDTensor>(framework::GradVarName("Out"));
532
    phi::funcs::ElementwiseGradPreProcess(dout, dx);
533 534 535
  }
};

536 537
DECLARE_INPLACE_OP_INFERER(ElementwiseOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(ElementwiseGradOpInplaceInferer,
538 539
                           {framework::GradVarName("Out"),
                            framework::GradVarName("X")});
540 541
DECLARE_INPLACE_OP_INFERER(ElementwiseDoubleGradOpInplaceInferer,
                           {"DDX", "DDOut"});
D
dzhwinter 已提交
542

543 544 545
DECLARE_INPLACE_OP_INFERER(ElementwiseTripleGradOpInplaceInferer,
                           {"D_DDOut", "D_DDX"});

546 547 548
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseGradNoBufVarsInferer, "X", "Y");
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseDoubleGradNoBufVarsInferer, "Y",
                                    "DOut");
549 550
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ElementwiseTripleGradNoBufVarsInferer,
                                    "DDX", "DDY");
S
sneaxiy 已提交
551

G
gongweibao 已提交
552 553
}  // namespace operators
}  // namespace paddle
H
hong 已提交
554 555 556 557 558 559 560 561
#define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name)              \
  template <typename T>                                                 \
  class kernel_type##GradMaker                                          \
      : public paddle::framework::SingleGradOpMaker<T> {                \
   public:                                                              \
    using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker; \
                                                                        \
   protected:                                                           \
562
    void Apply(::paddle::framework::GradOpPtr<T> op) const override {   \
H
hong 已提交
563
      op->SetType(#kernel_type "_grad");                                \
564
      op->SetInput("X", this->Input("X"));                              \
H
hong 已提交
565 566 567 568 569 570 571 572 573
      op->SetInput("Y", this->Input("Y"));                              \
      op->SetInput(::paddle::framework::GradVarName("Out"),             \
                   this->OutputGrad("Out"));                            \
      op->SetAttrMap(this->Attrs());                                    \
      op->SetOutput(::paddle::framework::GradVarName("X"),              \
                    this->InputGrad("X"));                              \
      op->SetOutput(::paddle::framework::GradVarName("Y"),              \
                    this->InputGrad("Y"));                              \
    }                                                                   \
574 575
  }

576 577 578 579
#define REGISTER_ELEMWISE_EXPLICIT_OP_WITHOUT_GRAD(op_type, op_name)    \
  REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp,        \
                    ::paddle::operators::Elementwise##op_name##OpMaker, \
                    ::paddle::operators::ElementwiseOpInferVarType,     \
H
hong 已提交
580 581
                    op_type##GradMaker<::paddle::framework::OpDesc>,    \
                    op_type##GradMaker<::paddle::imperative::OpBase>,   \
582
                    ::paddle::operators::ElementwiseOpInplaceInferer);