From ad9b88adf215baca8e58f03f34953df90905fe28 Mon Sep 17 00:00:00 2001 From: RedContritio Date: Tue, 28 Mar 2023 14:52:43 +0800 Subject: [PATCH] =?UTF-8?q?[=E9=9D=99=E6=80=81=E5=9B=BE=E7=AE=97=E5=AD=90?= =?UTF-8?q?=E8=87=AA=E5=8A=A8=E7=94=9F=E6=88=90]=20support=20auto=20genera?= =?UTF-8?q?te=20for=20log=5Fsoftmax=20(#52036)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * support auto generate for log_softmax * add data_type --- paddle/fluid/operators/log_softmax_op.cc | 126 ----------------------- paddle/phi/api/yaml/backward.yaml | 11 ++ paddle/phi/api/yaml/legacy_backward.yaml | 10 -- paddle/phi/api/yaml/legacy_ops.yaml | 9 -- paddle/phi/api/yaml/op_compat.yaml | 4 + paddle/phi/api/yaml/ops.yaml | 10 ++ paddle/phi/ops/compat/log_softmax_sig.cc | 28 ----- 7 files changed, 25 insertions(+), 173 deletions(-) delete mode 100644 paddle/fluid/operators/log_softmax_op.cc delete mode 100644 paddle/phi/ops/compat/log_softmax_sig.cc diff --git a/paddle/fluid/operators/log_softmax_op.cc b/paddle/fluid/operators/log_softmax_op.cc deleted file mode 100644 index eb3ee5b7cd9..00000000000 --- a/paddle/fluid/operators/log_softmax_op.cc +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/common_infer_shape_functions.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class LogSoftmaxOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto input_data_type = - framework::OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(input_data_type, ctx.GetPlace()); - } -}; - -class LogSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "The input tensor of softmax, " - "whose dimension :attr:`axis` is the input_feature_dimensions."); - AddOutput("Out", "The normalized values with the same shape as X."); - AddAttr("axis", - "The dimension index of Input(x) to perform log_softmax," - "default -1 for last dimension") - .SetDefault(-1); - AddComment(R"DOC( -LogSoftmax Operator. - -)DOC"); - } -}; - -class LogSoftmaxOpInferVarType - : public framework::PassInDtypeAndVarTypeToOutput { - protected: - std::unordered_map& GetInputOutputWithSameType() - const override { - static std::unordered_map m{{"X", /*->*/ "Out"}}; - return m; - } -}; - -class LogSoftmaxGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "log_softmax_grad"); - OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), - "Input", - "Out@grad", - "log_softmax_grad"); - PADDLE_ENFORCE_EQ( - ctx->GetInputDim("Out"), - ctx->GetInputDim(framework::GradVarName("Out")), - platform::errors::InvalidArgument("Input(Out) and its gradients " - "should have the same shape.")); - - ctx->SetOutputDim(framework::GradVarName("X"), - ctx->GetInputDim(framework::GradVarName("Out"))); - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Out")), - ctx.device_context().GetPlace()); - } -}; - -template -class LogSoftmaxGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("log_softmax_grad"); - op->SetInput("Out", this->Output("Out")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -DECLARE_INFER_SHAPE_FUNCTOR(log_softmax, - LogSoftmaxInferShapeFunctor, - PD_INFER_META(phi::UnchangedInferMetaCheckAxis)); -REGISTER_OPERATOR(log_softmax, - ops::LogSoftmaxOp, - ops::LogSoftmaxOpMaker, - ops::LogSoftmaxOpInferVarType, - ops::LogSoftmaxGradOpMaker, - ops::LogSoftmaxGradOpMaker, - LogSoftmaxInferShapeFunctor); -REGISTER_OPERATOR(log_softmax_grad, ops::LogSoftmaxGradOp); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index b93926641c2..342a2488dc8 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -891,6 +891,17 @@ kernel : func : log_loss_grad +- backward_op : log_softmax_grad + forward : log_softmax(Tensor x, int axis = -1) -> Tensor(out) + args : (Tensor out, Tensor out_grad, int axis) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [out] + kernel : + func : log_softmax_grad + data_type : out_grad + - backward_op : logit_grad forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out) args : (Tensor x, Tensor out_grad, float eps) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 6f6da654533..d856ddb6999 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -622,16 +622,6 @@ no_need_buffer : bias optional : scale, bias -- backward_op : log_softmax_grad - forward : log_softmax(Tensor x, int axis) -> Tensor(out) - args : (Tensor out, Tensor out_grad, int axis) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [out] - kernel : - func : log_softmax_grad - - backward_op : logcumsumexp_grad forward : logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) infer_meta : diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 54329cef0ad..ca92fab1bba 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -892,15 +892,6 @@ data_type : dtype backend : place -- op : log_softmax - args : (Tensor x, int axis) - output : Tensor(out) - infer_meta : - func : UnchangedInferMetaCheckAxis - kernel : - func : log_softmax - backward : log_softmax_grad - - op : logcumsumexp args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 39491979018..61e0c5708d1 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1103,6 +1103,10 @@ - op : log_softmax backward : log_softmax_grad + inputs : + x : X + outputs : + out: Out extra : attrs : [bool use_mkldnn = false] diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index dee3dea12bb..3902cf6b0db 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -894,6 +894,16 @@ func : log_loss backward : log_loss_grad +- op : log_softmax + args : (Tensor x, int axis = -1) + output : Tensor(out) + infer_meta : + func : UnchangedInferMetaCheckAxis + kernel : + func : log_softmax + data_type : x + backward : log_softmax_grad + - op : logit args : (Tensor x, float eps = 1e-6f) output : Tensor diff --git a/paddle/phi/ops/compat/log_softmax_sig.cc b/paddle/phi/ops/compat/log_softmax_sig.cc deleted file mode 100644 index 20635c89875..00000000000 --- a/paddle/phi/ops/compat/log_softmax_sig.cc +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature LogSoftmaxGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature( - "log_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(log_softmax_grad, - phi::LogSoftmaxGradOpArgumentMapping); -- GitLab