diff --git a/paddle/fluid/operators/log_softmax_op.cc b/paddle/fluid/operators/log_softmax_op.cc deleted file mode 100644 index eb3ee5b7cd9be2dac0b1bd5e21a4d57cf221da98..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/log_softmax_op.cc +++ /dev/null @@ -1,126 +0,0 @@ -/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/common_infer_shape_functions.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class LogSoftmaxOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - auto input_data_type = - framework::OperatorWithKernel::IndicateVarDataType(ctx, "X"); - return phi::KernelKey(input_data_type, ctx.GetPlace()); - } -}; - -class LogSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", - "The input tensor of softmax, " - "whose dimension :attr:`axis` is the input_feature_dimensions."); - AddOutput("Out", "The normalized values with the same shape as X."); - AddAttr("axis", - "The dimension index of Input(x) to perform log_softmax," - "default -1 for last dimension") - .SetDefault(-1); - AddComment(R"DOC( -LogSoftmax Operator. - -)DOC"); - } -}; - -class LogSoftmaxOpInferVarType - : public framework::PassInDtypeAndVarTypeToOutput { - protected: - std::unordered_map& GetInputOutputWithSameType() - const override { - static std::unordered_map m{{"X", /*->*/ "Out"}}; - return m; - } -}; - -class LogSoftmaxGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "log_softmax_grad"); - OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), - "Input", - "Out@grad", - "log_softmax_grad"); - PADDLE_ENFORCE_EQ( - ctx->GetInputDim("Out"), - ctx->GetInputDim(framework::GradVarName("Out")), - platform::errors::InvalidArgument("Input(Out) and its gradients " - "should have the same shape.")); - - ctx->SetOutputDim(framework::GradVarName("X"), - ctx->GetInputDim(framework::GradVarName("Out"))); - } - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType( - ctx, framework::GradVarName("Out")), - ctx.device_context().GetPlace()); - } -}; - -template -class LogSoftmaxGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("log_softmax_grad"); - op->SetInput("Out", this->Output("Out")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetAttrMap(this->Attrs()); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -DECLARE_INFER_SHAPE_FUNCTOR(log_softmax, - LogSoftmaxInferShapeFunctor, - PD_INFER_META(phi::UnchangedInferMetaCheckAxis)); -REGISTER_OPERATOR(log_softmax, - ops::LogSoftmaxOp, - ops::LogSoftmaxOpMaker, - ops::LogSoftmaxOpInferVarType, - ops::LogSoftmaxGradOpMaker, - ops::LogSoftmaxGradOpMaker, - LogSoftmaxInferShapeFunctor); -REGISTER_OPERATOR(log_softmax_grad, ops::LogSoftmaxGradOp); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index b93926641c2d5d0fd5262c4cc9d7bc4286f2df6b..342a2488dc859f9168ae22c6beba813c2dfbcb94 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -891,6 +891,17 @@ kernel : func : log_loss_grad +- backward_op : log_softmax_grad + forward : log_softmax(Tensor x, int axis = -1) -> Tensor(out) + args : (Tensor out, Tensor out_grad, int axis) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [out] + kernel : + func : log_softmax_grad + data_type : out_grad + - backward_op : logit_grad forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out) args : (Tensor x, Tensor out_grad, float eps) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 6f6da65453362acfc41f47a94804c04ddb50fb2b..d856ddb6999f35c142d2037a0d6417997e759995 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -622,16 +622,6 @@ no_need_buffer : bias optional : scale, bias -- backward_op : log_softmax_grad - forward : log_softmax(Tensor x, int axis) -> Tensor(out) - args : (Tensor out, Tensor out_grad, int axis) - output : Tensor(x_grad) - infer_meta : - func : UnchangedInferMeta - param: [out] - kernel : - func : log_softmax_grad - - backward_op : logcumsumexp_grad forward : logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) infer_meta : diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 54329cef0ad7a5ee423d502d0b5a520b98b2eb1c..ca92fab1bba70323240132bba98824bdee77032e 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -892,15 +892,6 @@ data_type : dtype backend : place -- op : log_softmax - args : (Tensor x, int axis) - output : Tensor(out) - infer_meta : - func : UnchangedInferMetaCheckAxis - kernel : - func : log_softmax - backward : log_softmax_grad - - op : logcumsumexp args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 3949197901828abaa07c01d6b2994788bdcc6ab1..61e0c5708d1eb2cf87ab97d145b8084ba9c2ba4b 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1103,6 +1103,10 @@ - op : log_softmax backward : log_softmax_grad + inputs : + x : X + outputs : + out: Out extra : attrs : [bool use_mkldnn = false] diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index dee3dea12bb982d9bde5c8dca7c96c143efce2aa..3902cf6b0dbb91f9be4431af4d04a124373dc7be 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -894,6 +894,16 @@ func : log_loss backward : log_loss_grad +- op : log_softmax + args : (Tensor x, int axis = -1) + output : Tensor(out) + infer_meta : + func : UnchangedInferMetaCheckAxis + kernel : + func : log_softmax + data_type : x + backward : log_softmax_grad + - op : logit args : (Tensor x, float eps = 1e-6f) output : Tensor diff --git a/paddle/phi/ops/compat/log_softmax_sig.cc b/paddle/phi/ops/compat/log_softmax_sig.cc deleted file mode 100644 index 20635c89875f89dbfcf07b404fa57a654ecd8bd8..0000000000000000000000000000000000000000 --- a/paddle/phi/ops/compat/log_softmax_sig.cc +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature LogSoftmaxGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature( - "log_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(log_softmax_grad, - phi::LogSoftmaxGradOpArgumentMapping);