diff --git a/paddle/fluid/operators/cum_op.cc b/paddle/fluid/operators/cum_op.cc index 2c42280c6d45c11bace71ea453759a8268c92c42..a886e0dbbe99b5ebff74f7dcea066e4956bdb07e 100644 --- a/paddle/fluid/operators/cum_op.cc +++ b/paddle/fluid/operators/cum_op.cc @@ -123,74 +123,6 @@ class CumsumCompositeGradOpMaker : public prim::CompositeGradOpMakerBase { this->RecoverOutputName(dx, dx_name); } }; - -class LogcumsumexpOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "Input of logcumsumexp operator"); - AddOutput("Out", "Output of logcumsumexp operator"); - AddAttr("axis", - "The dimension to accumulate along. -1 means the last " - "dimension [default -1].") - .SetDefault(-1); - AddAttr( - "flatten", - "Whether to compute the logcumsumexp over the flattened array. " - "[default false].") - .SetDefault(false); - AddAttr("exclusive", - "Whether to perform exclusive logcumsumexp. [default false].") - .SetDefault(false); - AddAttr( - "reverse", - "If true, the logcumsumexp is performed in the reversed direction. " - "[default false].") - .SetDefault(false); - AddComment(R"DOC( -Returns the logarithm of the cumulative summation of the exponentiation of elements of input along the given axis. -By default, the first element of the result is the same of the first element of -the input. If exclusive is true, the first element of the result is the lowest finite value of the dtype of output tensor. -)DOC"); - } -}; - -class LogcumsumexpGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "logcumsumexp"); - OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "logcumsumexp"); - OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), - "Input", - "Out@GRAD", - "logcumsumexp"); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); - } -}; - -template -class LogcumsumexpGradMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr grad_op) const override { - grad_op->SetType("logcumsumexp_grad"); - grad_op->SetInput("X", this->Input("X")); - grad_op->SetInput("Out", this->Output("Out")); - grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis"))); - grad_op->SetAttr("flatten", - PADDLE_GET_CONST(bool, this->GetAttr("flatten"))); - grad_op->SetAttr("exclusive", - PADDLE_GET_CONST(bool, this->GetAttr("exclusive"))); - grad_op->SetAttr("reverse", - PADDLE_GET_CONST(bool, this->GetAttr("reverse"))); - } -}; - } // namespace operators } // namespace paddle @@ -200,9 +132,6 @@ DECLARE_INFER_SHAPE_FUNCTOR(cumsum, CumsumInferShapeFunctor, PD_INFER_META(phi::CumScalarAxisInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR(logcumsumexp, - LogcumsumexpInferShapeFunctor, - PD_INFER_META(phi::CumInferMeta)); REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, @@ -210,13 +139,6 @@ REGISTER_OPERATOR(cumsum, ops::CumsumGradMaker, ops::CumsumGradMaker, CumsumInferShapeFunctor); -REGISTER_OPERATOR(logcumsumexp, - ops::CumOp, - ops::LogcumsumexpOpMaker, - ops::LogcumsumexpGradMaker, - ops::LogcumsumexpGradMaker, - LogcumsumexpInferShapeFunctor); -REGISTER_OPERATOR(logcumsumexp_grad, ops::LogcumsumexpGradOp); REGISTER_OPERATOR(cumsum_grad, ops::CumGradOp); REGISTER_OP_VERSION(cumsum).AddCheckpoint( diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index e0a12e13fb4e94de5240adf62f120e5309da21fc..0a6062dd8294c52febb3c82a15861feaaf155136 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -966,6 +966,16 @@ func : log_softmax_grad data_type : out_grad +- backward_op : logcumsumexp_grad + forward : logcumsumexp(Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) -> Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + args : (Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse) + output : Tensor(x_grad) + kernel : + func : logcumsumexp_grad + - backward_op : logit_grad forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out) args : (Tensor x, Tensor out_grad, float eps) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 1e11bc54b3f51c102201520d9c8ed3fbaf2e72f7..b655f379cf71ebe31ecc2345e725086e5a008640 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -556,16 +556,6 @@ no_need_buffer : bias optional : scale, bias -- backward_op : logcumsumexp_grad - forward : logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) - infer_meta : - func : UnchangedInferMeta - param : [x] - args : (Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse) - output : Tensor(x_grad) - kernel : - func : logcumsumexp_grad - - backward_op : logsumexp_grad forward : logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index a689fbc17dfaf4c1c17d77d29b5597ebdaa0f48c..d827e7eabbfa7b0ab38073bcead528c249826fb2 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -800,15 +800,6 @@ data_type : dtype backend : place -- op : logcumsumexp - args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse) - output : Tensor(out) - infer_meta : - func : CumInferMeta - kernel : - func : logcumsumexp - backward : logcumsumexp_grad - - op : logspace args : (Tensor start, Tensor stop, Tensor num, Tensor base, DataType dtype, Place place={}) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 8a2ce29511f06cb9495087444577c03fbf0bd88b..552895cf25fde4d86fe97801f3c99606a4aeb9e1 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1247,6 +1247,13 @@ extra : attrs : [bool use_mkldnn = false] +- op : logcumsumexp + backward : logcumsumexp_grad + inputs : + x : X + outputs : + out : Out + - op : logical_and inputs : {x : X, y : Y} diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 110fc1838aba102564c6d864f742bb33c121afac..40e47845fe90022246d2253d7c0802a5ee29383e 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -1003,6 +1003,15 @@ data_type : x backward : log_softmax_grad +- op : logcumsumexp + args : (Tensor x, int axis=-1, bool flatten=false, bool exclusive=false, bool reverse=false) + output : Tensor(out) + infer_meta : + func : CumInferMeta + kernel : + func : logcumsumexp + backward : logcumsumexp_grad + - op : logical_and args : (Tensor x, Tensor y) output : Tensor(out) diff --git a/paddle/phi/ops/compat/logcumsumexp_sig.cc b/paddle/phi/ops/compat/logcumsumexp_sig.cc deleted file mode 100644 index 2c790903b633301cf9c42d872e0609c33a2ab606..0000000000000000000000000000000000000000 --- a/paddle/phi/ops/compat/logcumsumexp_sig.cc +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature LogcumsumexpOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("logcumsumexp", - {"X"}, - {"axis", "flatten", "exclusive", "reverse"}, - {"Out"}); -} - -KernelSignature LogcumsumexpGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("logcumsumexp_grad", - {"X", "Out", "Out@GRAD"}, - {"axis", "flatten", "exclusive", "reverse"}, - {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(logcumsumexp, phi::LogcumsumexpOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(logcumsumexp_grad, - phi::LogcumsumexpGradOpArgumentMapping);