cum_op.cc 7.8 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
E
emailweixu 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/fluid/framework/infershape_utils.h"
16
#include "paddle/fluid/framework/op_registry.h"
17
#include "paddle/fluid/framework/op_version_registry.h"
18 19
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
E
emailweixu 已提交
20 21 22 23 24 25 26

namespace paddle {
namespace operators {

class CumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
W
WangZhen 已提交
27

28
  phi::KernelKey GetExpectedKernelType(
W
WangZhen 已提交
29 30 31
      const framework::ExecutionContext& ctx) const override {
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
32
    return phi::KernelKey(input_data_type, ctx.GetPlace());
W
WangZhen 已提交
33
  }
E
emailweixu 已提交
34 35
};

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
class CumGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "cumsum");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "cumsum");
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }

  phi::KernelKey GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
    return phi::KernelKey(input_data_type, ctx.GetPlace());
  }
};

E
emailweixu 已提交
57 58
class CumsumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
59
  void Make() override {
Y
yuyang18 已提交
60 61
    AddInput("X", "Input of cumsum operator");
    AddOutput("Out", "Output of cumsum operator");
E
emailweixu 已提交
62
    AddAttr<int>("axis",
T
tianshuo78520a 已提交
63 64
                 "The dimension to accumulate along. -1 means the last "
                 "dimension [default -1].")
W
WangZhen 已提交
65 66
        .SetDefault(-1)
        .SupportTensor();
67 68 69 70
    AddAttr<bool>("flatten",
                  "Whether to compute the cumsum over the flattened array. "
                  "[default false].")
        .SetDefault(false);
E
emailweixu 已提交
71
    AddAttr<bool>("exclusive",
Y
yuyang18 已提交
72
                  "Whether to perform exclusive cumsum. [default false].")
E
emailweixu 已提交
73 74
        .SetDefault(false);
    AddAttr<bool>("reverse",
Y
yuyang18 已提交
75 76
                  "If true, the cumsum is performed in the reversed direction. "
                  "[default false].")
E
emailweixu 已提交
77 78 79 80
        .SetDefault(false);
    AddComment(R"DOC(
The cumulative sum of the elements along a given axis.
By default, the first element of the result is the same of the first element of
81
the input. If exclusive is true, the first element of the result is 0.
E
emailweixu 已提交
82 83 84 85
)DOC");
  }
};

H
hong 已提交
86 87
template <typename T>
class CumsumGradMaker : public framework::SingleGradOpMaker<T> {
E
emailweixu 已提交
88
 public:
H
hong 已提交
89
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
E
emailweixu 已提交
90 91

 protected:
92
  void Apply(GradOpPtr<T> grad_op) const override {
93 94 95 96
    grad_op->SetType("cumsum_grad");
    grad_op->SetInput("X", this->Input("X"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
97
    grad_op->SetAttrMap(this->Attrs());
98
    grad_op->SetAttr("reverse",
99
                     PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
E
emailweixu 已提交
100 101 102
  }
};

103 104 105 106 107 108 109 110 111
class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Input of logcumsumexp operator");
    AddOutput("Out", "Output of logcumsumexp operator");
    AddAttr<int>("axis",
                 "The dimension to accumulate along. -1 means the last "
                 "dimension [default -1].")
        .SetDefault(-1);
112 113 114 115
    AddAttr<bool>(
        "flatten",
        "Whether to compute the logcumsumexp over the flattened array. "
        "[default false].")
116 117 118 119
        .SetDefault(false);
    AddAttr<bool>("exclusive",
                  "Whether to perform exclusive logcumsumexp. [default false].")
        .SetDefault(false);
120 121 122 123
    AddAttr<bool>(
        "reverse",
        "If true, the logcumsumexp is performed in the reversed direction. "
        "[default false].")
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
        .SetDefault(false);
    AddComment(R"DOC(
Returns the logarithm of the cumulative summation of the exponentiation of elements of input along the given axis.
By default, the first element of the result is the same of the first element of
the input. If exclusive is true, the first element of the result is the lowest finite value of the dtype of output tensor.
)DOC");
  }
};

class LogcumsumexpGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logcumsumexp");
    OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "logcumsumexp");
140 141 142 143
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "logcumsumexp");
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }
};

template <typename T>
class LogcumsumexpGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
    grad_op->SetType("logcumsumexp_grad");
    grad_op->SetInput("X", this->Input("X"));
    grad_op->SetInput("Out", this->Output("Out"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
R
Ruibiao Chen 已提交
160
    grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis")));
161
    grad_op->SetAttr("flatten",
R
Ruibiao Chen 已提交
162
                     PADDLE_GET_CONST(bool, this->GetAttr("flatten")));
163
    grad_op->SetAttr("exclusive",
R
Ruibiao Chen 已提交
164
                     PADDLE_GET_CONST(bool, this->GetAttr("exclusive")));
165
    grad_op->SetAttr("reverse",
R
Ruibiao Chen 已提交
166
                     PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
167 168 169
  }
};

E
emailweixu 已提交
170 171 172 173
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
L
Leo Chen 已提交
174
using CPU = phi::CPUContext;
175 176
DECLARE_INFER_SHAPE_FUNCTOR(cumsum,
                            CumsumInferShapeFunctor,
W
WangZhen 已提交
177
                            PD_INFER_META(phi::CumScalarAxisInferMeta));
178

179 180
DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp,
                            LogcumsumexpInferShapeFunctor,
181
                            PD_INFER_META(phi::CumInferMeta));
182 183 184
REGISTER_OPERATOR(cumsum,
                  ops::CumOp,
                  ops::CumsumOpMaker,
H
hong 已提交
185
                  ops::CumsumGradMaker<paddle::framework::OpDesc>,
186 187
                  ops::CumsumGradMaker<paddle::imperative::OpBase>,
                  CumsumInferShapeFunctor);
188 189 190
REGISTER_OPERATOR(logcumsumexp,
                  ops::CumOp,
                  ops::LogcumsumexpOpMaker,
191 192 193 194
                  ops::LogcumsumexpGradMaker<paddle::framework::OpDesc>,
                  ops::LogcumsumexpGradMaker<paddle::imperative::OpBase>,
                  LogcumsumexpInferShapeFunctor);
REGISTER_OPERATOR(logcumsumexp_grad, ops::LogcumsumexpGradOp);
195
REGISTER_OPERATOR(cumsum_grad, ops::CumGradOp);
196

197 198
REGISTER_OP_VERSION(cumsum).AddCheckpoint(
    R"ROC(
199 200
      Upgrade cumsum add a new attribute [flatten].
    )ROC",
201 202 203 204 205
    paddle::framework::compatible::OpVersionDesc().NewAttr(
        "flatten",
        "In order to compute the cumsum over the flattened array when the "
        "argument `axis` in python API is None.",
        false));