minus_op.cc 4.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/minus_op.h"
16

P
phlrain 已提交
17
#include <memory>
18
#include <string>
19
#include <utility>
20
#include <vector>
Y
Yu Yang 已提交
21 22 23 24 25 26

namespace paddle {
namespace operators {

class MinusOp : public framework::OperatorWithKernel {
 public:
27 28
  MinusOp(const std::string &type,
          const framework::VariableNameMap &inputs,
29 30
          const framework::VariableNameMap &outputs,
          const framework::AttributeMap &attrs)
Y
Yu Yang 已提交
31 32
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

33
  void InferShape(framework::InferShapeContext *ctx) const override {
34
    PADDLE_ENFORCE_EQ(
35 36
        ctx->HasInput("X"),
        true,
37 38
        platform::errors::NotFound("Input(X) of MinusOp is not found."));
    PADDLE_ENFORCE_EQ(
39 40
        ctx->HasInput("Y"),
        true,
41 42
        platform::errors::NotFound("Input(Y) of MinusOp is not found."));
    PADDLE_ENFORCE_EQ(
43 44
        ctx->HasOutput("Out"),
        true,
45
        platform::errors::NotFound("Output(Out) of MinusOp is not found."));
46

Q
Qiao Longfei 已提交
47 48
    auto x_dims = ctx->GetInputDim("X");
    auto y_dims = ctx->GetInputDim("Y");
Y
Yu Yang 已提交
49

P
phlrain 已提交
50
    if (ctx->IsRuntime() ||
51
        (phi::product(x_dims) > 0 && phi::product(y_dims) > 0)) {
P
phlrain 已提交
52
      PADDLE_ENFORCE_EQ(
53 54
          x_dims,
          y_dims,
55 56 57
          platform::errors::InvalidArgument(
              "Minus operator must take two tensor with same dim, but received "
              "input X dim is:[%s], Y dim is:[%s]",
58 59
              x_dims,
              y_dims));
P
phlrain 已提交
60
    }
Q
Qiao Longfei 已提交
61 62
    ctx->SetOutputDim("Out", x_dims);
    ctx->ShareLoD("X", /*->*/ "Out");
Y
Yu Yang 已提交
63 64 65 66 67
  }
};

class MinusOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
68
  void Make() override {
69 70 71
    AddInput("X", "The left tensor of minus operator.");
    AddInput("Y", "The right tensor of minus operator.");
    AddOutput("Out", "The output tensor of minus operator.");
Y
Yu Yang 已提交
72

K
kexinzhao 已提交
73 74
    AddComment(R"DOC(
Minus Operator.
Y
Yu Yang 已提交
75

76 77
Equation:

K
kexinzhao 已提交
78
    $Out = X - Y$
79 80

Both the input `X` and `Y` can carry the LoD (Level of Details) information,
K
kexinzhao 已提交
81 82
or not. But the output only shares the LoD information with input `X`.

Y
Yu Yang 已提交
83 84 85
)DOC");
  }
};
86

87
class MinusGradDescMaker : public framework::GradOpDescMakerBase {
Y
Yu Yang 已提交
88
 public:
89 90
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;

Y
Yu Yang 已提交
91 92
  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
    std::vector<std::unique_ptr<framework::OpDesc>> ops;
93
    auto x_g = this->InputGrad("X");
Y
Yu Yang 已提交
94
    if (!x_g.empty()) {
Y
Yu Yang 已提交
95
      auto *x_g_op = new framework::OpDesc();
Y
Yu Yang 已提交
96
      x_g_op->SetType("scale");
97
      x_g_op->SetInput("X", this->OutputGrad("Out"));
Y
Yu Yang 已提交
98 99 100 101 102
      x_g_op->SetOutput("Out", x_g);
      x_g_op->SetAttr("scale", 1.0f);
      ops.emplace_back(x_g_op);
    }

103
    auto y_g = this->InputGrad("Y");
Y
Yu Yang 已提交
104
    if (!y_g.empty()) {
Y
Yu Yang 已提交
105
      auto *y_g_op = new framework::OpDesc();
Y
Yu Yang 已提交
106
      y_g_op->SetType("scale");
107 108 109 110 111 112 113 114 115 116 117 118 119 120
      y_g_op->SetInput("X", this->OutputGrad("Out"));
      y_g_op->SetOutput("Out", y_g);
      y_g_op->SetAttr("scale", -1.0f);
      ops.emplace_back(y_g_op);
    }

    return ops;
  }
};

class MinusGradMaker : public imperative::GradOpBaseMakerBase {
 public:
  using imperative::GradOpBaseMakerBase::GradOpBaseMakerBase;

121
  std::shared_ptr<imperative::GradOpNode> operator()() const override {
122
    auto x_g = this->InputGrad("X");
123 124 125 126
    auto y_g = this->InputGrad("Y");

    auto node = this->NewGradNode();

127
    if (!x_g.empty()) {
128
      imperative::TracedGradOp op(node);
129 130 131 132
      op.SetType("scale");
      op.SetInput("X", this->OutputGrad("Out"));
      op.SetOutput("Out", x_g);
      op.SetAttr("scale", 1.0f);
133
      op.SetDefaultAttrsMap(DefaultAttrsMap());
134 135 136
    }

    if (!y_g.empty()) {
137
      imperative::TracedGradOp op(node);
138 139 140 141
      op.SetType("scale");
      op.SetInput("X", this->OutputGrad("Out"));
      op.SetOutput("Out", y_g);
      op.SetAttr("scale", -1.0f);
142
      op.SetDefaultAttrsMap(DefaultAttrsMap());
Y
Yu Yang 已提交
143 144
    }

145
    return node;
Y
Yu Yang 已提交
146 147 148 149 150 151 152
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
153 154 155 156 157
REGISTER_OPERATOR(minus,
                  ops::MinusOp,
                  ops::MinusOpMaker,
                  ops::MinusGradDescMaker,
                  ops::MinusGradMaker);
L
Leo Chen 已提交
158
REGISTER_OP_CPU_KERNEL(minus, ops::MinusKernel<phi::CPUContext, float>);
159

L
Leo Chen 已提交
160
REGISTER_OP_CUDA_KERNEL(minus, ops::MinusKernel<phi::GPUContext, float>);