minus_op.cc 4.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/minus_op.h"
16

P
phlrain 已提交
17
#include <memory>
18
#include <string>
19
#include <utility>
20
#include <vector>
Y
Yu Yang 已提交
21 22 23 24 25 26

namespace paddle {
namespace operators {

class MinusOp : public framework::OperatorWithKernel {
 public:
27 28 29
  MinusOp(const std::string &type, const framework::VariableNameMap &inputs,
          const framework::VariableNameMap &outputs,
          const framework::AttributeMap &attrs)
Y
Yu Yang 已提交
30 31
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

32
  void InferShape(framework::InferShapeContext *ctx) const override {
33 34 35 36 37 38 39 40 41
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("X"), true,
        platform::errors::NotFound("Input(X) of MinusOp is not found."));
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("Y"), true,
        platform::errors::NotFound("Input(Y) of MinusOp is not found."));
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("Out"), true,
        platform::errors::NotFound("Output(Out) of MinusOp is not found."));
42

Q
Qiao Longfei 已提交
43 44
    auto x_dims = ctx->GetInputDim("X");
    auto y_dims = ctx->GetInputDim("Y");
Y
Yu Yang 已提交
45

P
phlrain 已提交
46
    if (ctx->IsRuntime() ||
47
        (phi::product(x_dims) > 0 && phi::product(y_dims) > 0)) {
P
phlrain 已提交
48 49
      PADDLE_ENFORCE_EQ(
          x_dims, y_dims,
50 51 52 53
          platform::errors::InvalidArgument(
              "Minus operator must take two tensor with same dim, but received "
              "input X dim is:[%s], Y dim is:[%s]",
              x_dims, y_dims));
P
phlrain 已提交
54
    }
Q
Qiao Longfei 已提交
55 56
    ctx->SetOutputDim("Out", x_dims);
    ctx->ShareLoD("X", /*->*/ "Out");
Y
Yu Yang 已提交
57 58 59 60 61
  }
};

class MinusOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
62
  void Make() override {
63 64 65
    AddInput("X", "The left tensor of minus operator.");
    AddInput("Y", "The right tensor of minus operator.");
    AddOutput("Out", "The output tensor of minus operator.");
Y
Yu Yang 已提交
66

K
kexinzhao 已提交
67 68
    AddComment(R"DOC(
Minus Operator.
Y
Yu Yang 已提交
69

70 71
Equation:

K
kexinzhao 已提交
72
    $Out = X - Y$
73 74

Both the input `X` and `Y` can carry the LoD (Level of Details) information,
K
kexinzhao 已提交
75 76
or not. But the output only shares the LoD information with input `X`.

Y
Yu Yang 已提交
77 78 79
)DOC");
  }
};
80

81
class MinusGradDescMaker : public framework::GradOpDescMakerBase {
Y
Yu Yang 已提交
82
 public:
83 84
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;

Y
Yu Yang 已提交
85 86
  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
    std::vector<std::unique_ptr<framework::OpDesc>> ops;
87
    auto x_g = this->InputGrad("X");
Y
Yu Yang 已提交
88
    if (!x_g.empty()) {
Y
Yu Yang 已提交
89
      auto *x_g_op = new framework::OpDesc();
Y
Yu Yang 已提交
90
      x_g_op->SetType("scale");
91
      x_g_op->SetInput("X", this->OutputGrad("Out"));
Y
Yu Yang 已提交
92 93 94 95 96
      x_g_op->SetOutput("Out", x_g);
      x_g_op->SetAttr("scale", 1.0f);
      ops.emplace_back(x_g_op);
    }

97
    auto y_g = this->InputGrad("Y");
Y
Yu Yang 已提交
98
    if (!y_g.empty()) {
Y
Yu Yang 已提交
99
      auto *y_g_op = new framework::OpDesc();
Y
Yu Yang 已提交
100
      y_g_op->SetType("scale");
101 102 103 104 105 106 107 108 109 110 111 112 113 114
      y_g_op->SetInput("X", this->OutputGrad("Out"));
      y_g_op->SetOutput("Out", y_g);
      y_g_op->SetAttr("scale", -1.0f);
      ops.emplace_back(y_g_op);
    }

    return ops;
  }
};

class MinusGradMaker : public imperative::GradOpBaseMakerBase {
 public:
  using imperative::GradOpBaseMakerBase::GradOpBaseMakerBase;

115
  std::shared_ptr<imperative::GradOpNode> operator()() const override {
116
    auto x_g = this->InputGrad("X");
117 118 119 120
    auto y_g = this->InputGrad("Y");

    auto node = this->NewGradNode();

121
    if (!x_g.empty()) {
122
      imperative::TracedGradOp op(node);
123 124 125 126
      op.SetType("scale");
      op.SetInput("X", this->OutputGrad("Out"));
      op.SetOutput("Out", x_g);
      op.SetAttr("scale", 1.0f);
127 128 129
    }

    if (!y_g.empty()) {
130
      imperative::TracedGradOp op(node);
131 132 133 134
      op.SetType("scale");
      op.SetInput("X", this->OutputGrad("Out"));
      op.SetOutput("Out", y_g);
      op.SetAttr("scale", -1.0f);
Y
Yu Yang 已提交
135 136
    }

137
    return node;
Y
Yu Yang 已提交
138 139 140 141 142 143 144
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
145 146
REGISTER_OPERATOR(minus, ops::MinusOp, ops::MinusOpMaker,
                  ops::MinusGradDescMaker, ops::MinusGradMaker);
Q
QI JUN 已提交
147 148
REGISTER_OP_CPU_KERNEL(
    minus, ops::MinusKernel<paddle::platform::CPUDeviceContext, float>);
149 150 151

REGISTER_OP_CUDA_KERNEL(
    minus, ops::MinusKernel<paddle::platform::CUDADeviceContext, float>);