未验证 提交 ad9b88ad 编写于 作者: R RedContritio 提交者: GitHub

[静态图算子自动生成] support auto generate for log_softmax (#52036)

* support auto generate for log_softmax

* add data_type
上级 cfe3ff48
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include <unordered_map>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/common_infer_shape_functions.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class LogSoftmaxOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
};
class LogSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"The input tensor of softmax, "
"whose dimension :attr:`axis` is the input_feature_dimensions.");
AddOutput("Out", "The normalized values with the same shape as X.");
AddAttr<int>("axis",
"The dimension index of Input(x) to perform log_softmax,"
"default -1 for last dimension")
.SetDefault(-1);
AddComment(R"DOC(
LogSoftmax Operator.
)DOC");
}
};
class LogSoftmaxOpInferVarType
: public framework::PassInDtypeAndVarTypeToOutput {
protected:
std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
const override {
static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
return m;
}
};
class LogSoftmaxGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "log_softmax_grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
"Out@grad",
"log_softmax_grad");
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Out"),
ctx->GetInputDim(framework::GradVarName("Out")),
platform::errors::InvalidArgument("Input(Out) and its gradients "
"should have the same shape."));
ctx->SetOutputDim(framework::GradVarName("X"),
ctx->GetInputDim(framework::GradVarName("Out")));
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context().GetPlace());
}
};
template <typename T>
class LogSoftmaxGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("log_softmax_grad");
op->SetInput("Out", this->Output("Out"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(log_softmax,
LogSoftmaxInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMetaCheckAxis));
REGISTER_OPERATOR(log_softmax,
ops::LogSoftmaxOp,
ops::LogSoftmaxOpMaker,
ops::LogSoftmaxOpInferVarType,
ops::LogSoftmaxGradOpMaker<paddle::framework::OpDesc>,
ops::LogSoftmaxGradOpMaker<paddle::imperative::OpBase>,
LogSoftmaxInferShapeFunctor);
REGISTER_OPERATOR(log_softmax_grad, ops::LogSoftmaxGradOp);
......@@ -891,6 +891,17 @@
kernel :
func : log_loss_grad
- backward_op : log_softmax_grad
forward : log_softmax(Tensor x, int axis = -1) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out]
kernel :
func : log_softmax_grad
data_type : out_grad
- backward_op : logit_grad
forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float eps)
......
......@@ -622,16 +622,6 @@
no_need_buffer : bias
optional : scale, bias
- backward_op : log_softmax_grad
forward : log_softmax(Tensor x, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out]
kernel :
func : log_softmax_grad
- backward_op : logcumsumexp_grad
forward : logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta :
......
......@@ -892,15 +892,6 @@
data_type : dtype
backend : place
- op : log_softmax
args : (Tensor x, int axis)
output : Tensor(out)
infer_meta :
func : UnchangedInferMetaCheckAxis
kernel :
func : log_softmax
backward : log_softmax_grad
- op : logcumsumexp
args : (Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output : Tensor(out)
......
......@@ -1103,6 +1103,10 @@
- op : log_softmax
backward : log_softmax_grad
inputs :
x : X
outputs :
out: Out
extra :
attrs : [bool use_mkldnn = false]
......
......@@ -894,6 +894,16 @@
func : log_loss
backward : log_loss_grad
- op : log_softmax
args : (Tensor x, int axis = -1)
output : Tensor(out)
infer_meta :
func : UnchangedInferMetaCheckAxis
kernel :
func : log_softmax
data_type : x
backward : log_softmax_grad
- op : logit
args : (Tensor x, float eps = 1e-6f)
output : Tensor
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature LogSoftmaxGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"log_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(log_softmax_grad,
phi::LogSoftmaxGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册