cum_op.cc 9.0 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/fluid/framework/infershape_utils.h"
16
#include "paddle/fluid/framework/op_registry.h"
17
#include "paddle/fluid/framework/op_version_registry.h"
G
GGBond8488 已提交
18 19 20
#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h"
#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h"
21 22
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
E
emailweixu 已提交
23 24 25 26 27 28 29

namespace paddle {
namespace operators {

class CumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
W
WangZhen 已提交
30

31
  phi::KernelKey GetExpectedKernelType(
W
WangZhen 已提交
32 33 34
      const framework::ExecutionContext& ctx) const override {
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
35
    return phi::KernelKey(input_data_type, ctx.GetPlace());
W
WangZhen 已提交
36
  }
E
emailweixu 已提交
37 38
};

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
class CumGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "cumsum");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "cumsum");
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }

  phi::KernelKey GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
    return phi::KernelKey(input_data_type, ctx.GetPlace());
  }
};

E
emailweixu 已提交
60 61
class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
62
  void Make() override {
Y
yuyang18 已提交
63 64
    AddInput("X", "Input of cumsum operator");
    AddOutput("Out", "Output of cumsum operator");
E
emailweixu 已提交
65
    AddAttr<int>("axis",
T
tianshuo78520a 已提交
66 67
                 "The dimension to accumulate along. -1 means the last "
                 "dimension [default -1].")
W
WangZhen 已提交
68 69
        .SetDefault(-1)
        .SupportTensor();
70 71 72 73
    AddAttr<bool>("flatten",
                  "Whether to compute the cumsum over the flattened array. "
                  "[default false].")
        .SetDefault(false);
E
emailweixu 已提交
74
    AddAttr<bool>("exclusive",
Y
yuyang18 已提交
75
                  "Whether to perform exclusive cumsum. [default false].")
E
emailweixu 已提交
76 77
        .SetDefault(false);
    AddAttr<bool>("reverse",
Y
yuyang18 已提交
78 79
                  "If true, the cumsum is performed in the reversed direction. "
                  "[default false].")
E
emailweixu 已提交
80 81 82 83
        .SetDefault(false);
    AddComment(R"DOC(
The cumulative sum of the elements along a given axis.
By default, the first element of the result is the same of the first element of
84
the input. If exclusive is true, the first element of the result is 0.
E
emailweixu 已提交
85 86 87 88
)DOC");
  }
};

H
hong 已提交
89 90
template <typename T>
class CumsumGradMaker : public framework::SingleGradOpMaker<T> {
E
emailweixu 已提交
91
 public:
H
hong 已提交
92
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
E
emailweixu 已提交
93 94

 protected:
95
  void Apply(GradOpPtr<T> grad_op) const override {
96 97 98 99
    grad_op->SetType("cumsum_grad");
    grad_op->SetInput("X", this->Input("X"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
100
    grad_op->SetAttrMap(this->Attrs());
101
    grad_op->SetAttr("reverse",
102
                     PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
E
emailweixu 已提交
103 104 105
  }
};

G
GGBond8488 已提交
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
class CumsumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
  using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase;

 public:
  void Apply() override {
    paddle::experimental::Tensor x = this->GetSingleForwardInput("X");
    paddle::experimental::Tensor out_grad = this->GetSingleOutputGrad("Out");
    paddle::experimental::Tensor dx = this->GetSingleInputGrad("X");
    auto* dx_ptr = this->GetOutputPtr(&dx);
    std::string dx_name = this->GetOutputName(dx);
    int axis = static_cast<int>(this->Attr<int>("axis"));
    bool flatten = static_cast<bool>(this->Attr<bool>("flatten"));
    bool exclusive = static_cast<bool>(this->Attr<bool>("exclusive"));
    bool reverse = static_cast<bool>(this->Attr<bool>("reverse"));
    VLOG(6) << "Runing add_grad composite func";
    prim::cumsum_grad<prim::DescTensor>(
        x, out_grad, axis, flatten, exclusive, reverse, dx_ptr);
    this->RecoverOutputName(dx, dx_name);
  }
};

127 128 129 130 131 132 133 134 135
class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of logcumsumexp operator");
    AddOutput("Out", "Output of logcumsumexp operator");
    AddAttr<int>("axis",
                 "The dimension to accumulate along. -1 means the last "
                 "dimension [default -1].")
        .SetDefault(-1);
136 137 138 139
    AddAttr<bool>(
        "flatten",
        "Whether to compute the logcumsumexp over the flattened array. "
        "[default false].")
140 141 142 143
        .SetDefault(false);
    AddAttr<bool>("exclusive",
                  "Whether to perform exclusive logcumsumexp. [default false].")
        .SetDefault(false);
144 145 146 147
    AddAttr<bool>(
        "reverse",
        "If true, the logcumsumexp is performed in the reversed direction. "
        "[default false].")
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
        .SetDefault(false);
    AddComment(R"DOC(
Returns the logarithm of the cumulative summation of the exponentiation of elements of input along the given axis.
By default, the first element of the result is the same of the first element of
the input. If exclusive is true, the first element of the result is the lowest finite value of the dtype of output tensor.
)DOC");
  }
};

class LogcumsumexpGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logcumsumexp");
    OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "logcumsumexp");
164 165 166 167
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "logcumsumexp");
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }
};

template <typename T>
class LogcumsumexpGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
    grad_op->SetType("logcumsumexp_grad");
    grad_op->SetInput("X", this->Input("X"));
    grad_op->SetInput("Out", this->Output("Out"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
R
Ruibiao Chen 已提交
184
    grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis")));
185
    grad_op->SetAttr("flatten",
R
Ruibiao Chen 已提交
186
                     PADDLE_GET_CONST(bool, this->GetAttr("flatten")));
187
    grad_op->SetAttr("exclusive",
R
Ruibiao Chen 已提交
188
                     PADDLE_GET_CONST(bool, this->GetAttr("exclusive")));
189
    grad_op->SetAttr("reverse",
R
Ruibiao Chen 已提交
190
                     PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
191 192 193
  }
};

E
emailweixu 已提交
194 195 196 197
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
L
Leo Chen 已提交
198
using CPU = phi::CPUContext;
199 200
DECLARE_INFER_SHAPE_FUNCTOR(cumsum,
                            CumsumInferShapeFunctor,
W
WangZhen 已提交
201
                            PD_INFER_META(phi::CumScalarAxisInferMeta));
202

203 204
DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp,
                            LogcumsumexpInferShapeFunctor,
205
                            PD_INFER_META(phi::CumInferMeta));
206 207 208
REGISTER_OPERATOR(cumsum,
                  ops::CumOp,
                  ops::CumsumOpMaker,
G
GGBond8488 已提交
209
                  ops::CumsumCompositeGradOpMaker,
H
hong 已提交
210
                  ops::CumsumGradMaker<paddle::framework::OpDesc>,
211 212
                  ops::CumsumGradMaker<paddle::imperative::OpBase>,
                  CumsumInferShapeFunctor);
213 214 215
REGISTER_OPERATOR(logcumsumexp,
                  ops::CumOp,
                  ops::LogcumsumexpOpMaker,
216 217 218 219
                  ops::LogcumsumexpGradMaker<paddle::framework::OpDesc>,
                  ops::LogcumsumexpGradMaker<paddle::imperative::OpBase>,
                  LogcumsumexpInferShapeFunctor);
REGISTER_OPERATOR(logcumsumexp_grad, ops::LogcumsumexpGradOp);
220
REGISTER_OPERATOR(cumsum_grad, ops::CumGradOp);
221

222 223
REGISTER_OP_VERSION(cumsum).AddCheckpoint(
    R"ROC(
224 225
      Upgrade cumsum add a new attribute [flatten].
    )ROC",
226 227 228 229 230
    paddle::framework::compatible::OpVersionDesc().NewAttr(
        "flatten",
        "In order to compute the cumsum over the flattened array when the "
        "argument `axis` in python API is None.",
        false));