未验证 提交 891cf433 编写于 作者: W Wang Xin 提交者: GitHub

add autogen code support for logcumsumexp op (#52682)

上级 aa35331f
......@@ -123,74 +123,6 @@ class CumsumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
this->RecoverOutputName(dx, dx_name);
}
};
class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of logcumsumexp operator");
AddOutput("Out", "Output of logcumsumexp operator");
AddAttr<int>("axis",
"The dimension to accumulate along. -1 means the last "
"dimension [default -1].")
.SetDefault(-1);
AddAttr<bool>(
"flatten",
"Whether to compute the logcumsumexp over the flattened array. "
"[default false].")
.SetDefault(false);
AddAttr<bool>("exclusive",
"Whether to perform exclusive logcumsumexp. [default false].")
.SetDefault(false);
AddAttr<bool>(
"reverse",
"If true, the logcumsumexp is performed in the reversed direction. "
"[default false].")
.SetDefault(false);
AddComment(R"DOC(
Returns the logarithm of the cumulative summation of the exponentiation of elements of input along the given axis.
By default, the first element of the result is the same of the first element of
the input. If exclusive is true, the first element of the result is the lowest finite value of the dtype of output tensor.
)DOC");
}
};
class LogcumsumexpGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logcumsumexp");
OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "logcumsumexp");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
"Out@GRAD",
"logcumsumexp");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
};
template <typename T>
class LogcumsumexpGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("logcumsumexp_grad");
grad_op->SetInput("X", this->Input("X"));
grad_op->SetInput("Out", this->Output("Out"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis")));
grad_op->SetAttr("flatten",
PADDLE_GET_CONST(bool, this->GetAttr("flatten")));
grad_op->SetAttr("exclusive",
PADDLE_GET_CONST(bool, this->GetAttr("exclusive")));
grad_op->SetAttr("reverse",
PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
}
};
} // namespace operators
} // namespace paddle
......@@ -200,9 +132,6 @@ DECLARE_INFER_SHAPE_FUNCTOR(cumsum,
CumsumInferShapeFunctor,
PD_INFER_META(phi::CumScalarAxisInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp,
LogcumsumexpInferShapeFunctor,
PD_INFER_META(phi::CumInferMeta));
REGISTER_OPERATOR(cumsum,
ops::CumOp,
ops::CumsumOpMaker,
......@@ -210,13 +139,6 @@ REGISTER_OPERATOR(cumsum,
ops::CumsumGradMaker<paddle::framework::OpDesc>,
ops::CumsumGradMaker<paddle::imperative::OpBase>,
CumsumInferShapeFunctor);
REGISTER_OPERATOR(logcumsumexp,
ops::CumOp,
ops::LogcumsumexpOpMaker,
ops::LogcumsumexpGradMaker<paddle::framework::OpDesc>,
ops::LogcumsumexpGradMaker<paddle::imperative::OpBase>,
LogcumsumexpInferShapeFunctor);
REGISTER_OPERATOR(logcumsumexp_grad, ops::LogcumsumexpGradOp);
REGISTER_OPERATOR(cumsum_grad, ops::CumGradOp);
REGISTER_OP_VERSION(cumsum).AddCheckpoint(
......
......@@ -966,6 +966,16 @@
func : log_softmax_grad
data_type : out_grad
- backward_op : logcumsumexp_grad
forward : logcumsumexp(Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) -> Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
args : (Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse)
output : Tensor(x_grad)
kernel :
func : logcumsumexp_grad
- backward_op : logit_grad
forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float eps)
......
......@@ -556,16 +556,6 @@
no_need_buffer : bias
optional : scale, bias
- backward_op : logcumsumexp_grad
forward : logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
args : (Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse)
output : Tensor(x_grad)
kernel :
func : logcumsumexp_grad
- backward_op : logsumexp_grad
forward : logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all)
......
......@@ -800,15 +800,6 @@
data_type : dtype
backend : place
- op : logcumsumexp
args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output : Tensor(out)
infer_meta :
func : CumInferMeta
kernel :
func : logcumsumexp
backward : logcumsumexp_grad
- op : logspace
args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={})
output : Tensor(out)
......
......@@ -1247,6 +1247,13 @@
extra :
attrs : [bool use_mkldnn = false]
- op : logcumsumexp
backward : logcumsumexp_grad
inputs :
x : X
outputs :
out : Out
- op : logical_and
inputs :
{x : X, y : Y}
......
......@@ -1003,6 +1003,15 @@
data_type : x
backward : log_softmax_grad
- op : logcumsumexp
args : (Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false)
output : Tensor(out)
infer_meta :
func : CumInferMeta
kernel :
func : logcumsumexp
backward : logcumsumexp_grad
- op : logical_and
args : (Tensor x, Tensor y)
output : Tensor(out)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature LogcumsumexpOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("logcumsumexp",
{"X"},
{"axis", "flatten", "exclusive", "reverse"},
{"Out"});
}
KernelSignature LogcumsumexpGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("logcumsumexp_grad",
{"X", "Out", "Out@GRAD"},
{"axis", "flatten", "exclusive", "reverse"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(logcumsumexp, phi::LogcumsumexpOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(logcumsumexp_grad,
phi::LogcumsumexpGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册