cum_op.cc 7.1 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/fluid/framework/infershape_utils.h"
16
#include "paddle/fluid/framework/op_registry.h"
17
#include "paddle/fluid/framework/op_version_registry.h"
18 19
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
E
emailweixu 已提交
20 21 22 23 24 25 26

namespace paddle {
namespace operators {

class CumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
W
WangZhen 已提交
27 28 29 30 31 32 33

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
E
emailweixu 已提交
34 35 36 37
};

class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
38
  void Make() override {
Y
yuyang18 已提交
39 40
    AddInput("X", "Input of cumsum operator");
    AddOutput("Out", "Output of cumsum operator");
E
emailweixu 已提交
41
    AddAttr<int>("axis",
T
tianshuo78520a 已提交
42 43
                 "The dimension to accumulate along. -1 means the last "
                 "dimension [default -1].")
W
WangZhen 已提交
44 45
        .SetDefault(-1)
        .SupportTensor();
46 47 48 49
    AddAttr<bool>("flatten",
                  "Whether to compute the cumsum over the flattened array. "
                  "[default false].")
        .SetDefault(false);
E
emailweixu 已提交
50
    AddAttr<bool>("exclusive",
Y
yuyang18 已提交
51
                  "Whether to perform exclusive cumsum. [default false].")
E
emailweixu 已提交
52 53
        .SetDefault(false);
    AddAttr<bool>("reverse",
Y
yuyang18 已提交
54 55
                  "If true, the cumsum is performed in the reversed direction. "
                  "[default false].")
E
emailweixu 已提交
56 57 58 59
        .SetDefault(false);
    AddComment(R"DOC(
The cumulative sum of the elements along a given axis.
By default, the first element of the result is the same of the first element of
60
the input. If exclusive is true, the first element of the result is 0.
E
emailweixu 已提交
61 62 63 64
)DOC");
  }
};

H
hong 已提交
65 66
template <typename T>
class CumsumGradMaker : public framework::SingleGradOpMaker<T> {
E
emailweixu 已提交
67
 public:
H
hong 已提交
68
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
E
emailweixu 已提交
69 70

 protected:
71
  void Apply(GradOpPtr<T> grad_op) const override {
E
emailweixu 已提交
72
    grad_op->SetType("cumsum");
H
hong 已提交
73 74
    grad_op->SetInput("X", this->OutputGrad("Out"));
    grad_op->SetOutput("Out", this->InputGrad("X"));
R
Ruibiao Chen 已提交
75
    grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis")));
76
    grad_op->SetAttr("flatten",
R
Ruibiao Chen 已提交
77
                     PADDLE_GET_CONST(bool, this->GetAttr("flatten")));
78
    grad_op->SetAttr("reverse",
R
Ruibiao Chen 已提交
79
                     !PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
80
    grad_op->SetAttr("exclusive",
R
Ruibiao Chen 已提交
81
                     PADDLE_GET_CONST(bool, this->GetAttr("exclusive")));
E
emailweixu 已提交
82 83 84
  }
};

85 86 87 88 89 90 91 92 93
class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of logcumsumexp operator");
    AddOutput("Out", "Output of logcumsumexp operator");
    AddAttr<int>("axis",
                 "The dimension to accumulate along. -1 means the last "
                 "dimension [default -1].")
        .SetDefault(-1);
94 95 96 97
    AddAttr<bool>(
        "flatten",
        "Whether to compute the logcumsumexp over the flattened array. "
        "[default false].")
98 99 100 101
        .SetDefault(false);
    AddAttr<bool>("exclusive",
                  "Whether to perform exclusive logcumsumexp. [default false].")
        .SetDefault(false);
102 103 104 105
    AddAttr<bool>(
        "reverse",
        "If true, the logcumsumexp is performed in the reversed direction. "
        "[default false].")
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
        .SetDefault(false);
    AddComment(R"DOC(
Returns the logarithm of the cumulative summation of the exponentiation of elements of input along the given axis.
By default, the first element of the result is the same of the first element of
the input. If exclusive is true, the first element of the result is the lowest finite value of the dtype of output tensor.
)DOC");
  }
};

class LogcumsumexpGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logcumsumexp");
    OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "logcumsumexp");
122 123 124 125
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "logcumsumexp");
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }
};

template <typename T>
class LogcumsumexpGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
    grad_op->SetType("logcumsumexp_grad");
    grad_op->SetInput("X", this->Input("X"));
    grad_op->SetInput("Out", this->Output("Out"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
R
Ruibiao Chen 已提交
142
    grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis")));
143
    grad_op->SetAttr("flatten",
R
Ruibiao Chen 已提交
144
                     PADDLE_GET_CONST(bool, this->GetAttr("flatten")));
145
    grad_op->SetAttr("exclusive",
R
Ruibiao Chen 已提交
146
                     PADDLE_GET_CONST(bool, this->GetAttr("exclusive")));
147
    grad_op->SetAttr("reverse",
R
Ruibiao Chen 已提交
148
                     PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
149 150 151
  }
};

E
emailweixu 已提交
152 153 154 155
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
L
Leo Chen 已提交
156
using CPU = phi::CPUContext;
157 158
DECLARE_INFER_SHAPE_FUNCTOR(cumsum,
                            CumsumInferShapeFunctor,
W
WangZhen 已提交
159
                            PD_INFER_META(phi::CumScalarAxisInferMeta));
160 161
DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp,
                            LogcumsumexpInferShapeFunctor,
162
                            PD_INFER_META(phi::CumInferMeta));
163 164 165
REGISTER_OPERATOR(cumsum,
                  ops::CumOp,
                  ops::CumsumOpMaker,
H
hong 已提交
166
                  ops::CumsumGradMaker<paddle::framework::OpDesc>,
167 168
                  ops::CumsumGradMaker<paddle::imperative::OpBase>,
                  CumsumInferShapeFunctor);
169 170 171
REGISTER_OPERATOR(logcumsumexp,
                  ops::CumOp,
                  ops::LogcumsumexpOpMaker,
172 173 174 175
                  ops::LogcumsumexpGradMaker<paddle::framework::OpDesc>,
                  ops::LogcumsumexpGradMaker<paddle::imperative::OpBase>,
                  LogcumsumexpInferShapeFunctor);
REGISTER_OPERATOR(logcumsumexp_grad, ops::LogcumsumexpGradOp);
176

177 178
REGISTER_OP_VERSION(cumsum).AddCheckpoint(
    R"ROC(
179 180
      Upgrade cumsum add a new attribute [flatten].
    )ROC",
181 182 183 184 185
    paddle::framework::compatible::OpVersionDesc().NewAttr(
        "flatten",
        "In order to compute the cumsum over the flattened array when the "
        "argument `axis` in python API is None.",
        false));