From 891cf433e29455cd5740a6b5e59dad5109d826c8 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Mon, 10 Apr 2023 11:40:30 +0800 Subject: [PATCH] add autogen code support for logcumsumexp op (#52682) --- paddle/fluid/operators/cum_op.cc | 78 ----------------------- paddle/phi/api/yaml/backward.yaml | 10 +++ paddle/phi/api/yaml/legacy_backward.yaml | 10 --- paddle/phi/api/yaml/legacy_ops.yaml | 9 --- paddle/phi/api/yaml/op_compat.yaml | 7 ++ paddle/phi/api/yaml/ops.yaml | 9 +++ paddle/phi/ops/compat/logcumsumexp_sig.cc | 39 ------------ 7 files changed, 26 insertions(+), 136 deletions(-) delete mode 100644 paddle/phi/ops/compat/logcumsumexp_sig.cc diff --git a/paddle/fluid/operators/cum_op.cc b/paddle/fluid/operators/cum_op.cc index 2c42280c6d4..a886e0dbbe9 100644 --- a/paddle/fluid/operators/cum_op.cc +++ b/paddle/fluid/operators/cum_op.cc @@ -123,74 +123,6 @@ class CumsumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase { this->RecoverOutputName(dx, dx_name); } }; - -class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "Input of logcumsumexp operator"); - AddOutput("Out", "Output of logcumsumexp operator"); - AddAttr("axis", - "The dimension to accumulate along. -1 means the last " - "dimension [default -1].") - .SetDefault(-1); - AddAttr( - "flatten", - "Whether to compute the logcumsumexp over the flattened array. " - "[default false].") - .SetDefault(false); - AddAttr("exclusive", - "Whether to perform exclusive logcumsumexp. [default false].") - .SetDefault(false); - AddAttr( - "reverse", - "If true, the logcumsumexp is performed in the reversed direction. " - "[default false].") - .SetDefault(false); - AddComment(R"DOC( -Returns the logarithm of the cumulative summation of the exponentiation of elements of input along the given axis. -By default, the first element of the result is the same of the first element of -the input. If exclusive is true, the first element of the result is the lowest finite value of the dtype of output tensor. -)DOC"); - } -}; - -class LogcumsumexpGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logcumsumexp"); - OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "logcumsumexp"); - OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), - "Input", - "Out@GRAD", - "logcumsumexp"); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - } -}; - -template -class LogcumsumexpGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr grad_op) const override { - grad_op->SetType("logcumsumexp_grad"); - grad_op->SetInput("X", this->Input("X")); - grad_op->SetInput("Out", this->Output("Out")); - grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis"))); - grad_op->SetAttr("flatten", - PADDLE_GET_CONST(bool, this->GetAttr("flatten"))); - grad_op->SetAttr("exclusive", - PADDLE_GET_CONST(bool, this->GetAttr("exclusive"))); - grad_op->SetAttr("reverse", - PADDLE_GET_CONST(bool, this->GetAttr("reverse"))); - } -}; - } // namespace operators } // namespace paddle @@ -200,9 +132,6 @@ DECLARE_INFER_SHAPE_FUNCTOR(cumsum, CumsumInferShapeFunctor, PD_INFER_META(phi::CumScalarAxisInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp, - LogcumsumexpInferShapeFunctor, - PD_INFER_META(phi::CumInferMeta)); REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, @@ -210,13 +139,6 @@ REGISTER_OPERATOR(cumsum, ops::CumsumGradMaker, ops::CumsumGradMaker, CumsumInferShapeFunctor); -REGISTER_OPERATOR(logcumsumexp, - ops::CumOp, - ops::LogcumsumexpOpMaker, - ops::LogcumsumexpGradMaker, - ops::LogcumsumexpGradMaker, - LogcumsumexpInferShapeFunctor); -REGISTER_OPERATOR(logcumsumexp_grad, ops::LogcumsumexpGradOp); REGISTER_OPERATOR(cumsum_grad, ops::CumGradOp); REGISTER_OP_VERSION(cumsum).AddCheckpoint( diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index e0a12e13fb4..0a6062dd829 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -966,6 +966,16 @@ func : log_softmax_grad data_type : out_grad +- backward_op : logcumsumexp_grad + forward : logcumsumexp(Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) -> Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + args : (Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse) + output : Tensor(x_grad) + kernel : + func : logcumsumexp_grad + - backward_op : logit_grad forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out) args : (Tensor x, Tensor out_grad, float eps) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 1e11bc54b3f..b655f379cf7 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -556,16 +556,6 @@ no_need_buffer : bias optional : scale, bias -- backward_op : logcumsumexp_grad - forward : logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - args : (Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse) - output : Tensor(x_grad) - kernel : - func : logcumsumexp_grad - - backward_op : logsumexp_grad forward : logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index a689fbc17df..d827e7eabbf 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -800,15 +800,6 @@ data_type : dtype backend : place -- op : logcumsumexp - args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse) - output : Tensor(out) - infer_meta : - func : CumInferMeta - kernel : - func : logcumsumexp - backward : logcumsumexp_grad - - op : logspace args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={}) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 8a2ce29511f..552895cf25f 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1247,6 +1247,13 @@ extra : attrs : [bool use_mkldnn = false] +- op : logcumsumexp + backward : logcumsumexp_grad + inputs : + x : X + outputs : + out : Out + - op : logical_and inputs : {x : X, y : Y} diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 110fc1838ab..40e47845fe9 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -1003,6 +1003,15 @@ data_type : x backward : log_softmax_grad +- op : logcumsumexp + args : (Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) + output : Tensor(out) + infer_meta : + func : CumInferMeta + kernel : + func : logcumsumexp + backward : logcumsumexp_grad + - op : logical_and args : (Tensor x, Tensor y) output : Tensor(out) diff --git a/paddle/phi/ops/compat/logcumsumexp_sig.cc b/paddle/phi/ops/compat/logcumsumexp_sig.cc deleted file mode 100644 index 2c790903b63..00000000000 --- a/paddle/phi/ops/compat/logcumsumexp_sig.cc +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature LogcumsumexpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("logcumsumexp", - {"X"}, - {"axis", "flatten", "exclusive", "reverse"}, - {"Out"}); -} - -KernelSignature LogcumsumexpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("logcumsumexp_grad", - {"X", "Out", "Out@GRAD"}, - {"axis", "flatten", "exclusive", "reverse"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(logcumsumexp, phi::LogcumsumexpOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(logcumsumexp_grad, - phi::LogcumsumexpGradOpArgumentMapping); -- GitLab