未验证 提交 f9fadfc4 编写于 作者: L LoneRanger 提交者: GitHub

add autogen code support for lu (#52802)

* add autogen code support for lu

* fix bug

* fix bug

* fix bug

* fix bug
上级 77b4d0f1
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class LUOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddComment(R"DOC(LU decomposition,
Computes the LU factorization of a matrix or batches of matrices A.
)DOC");
AddInput("X", "(Tensor) The input tensor, shape of (*,m,n)");
AddOutput("Out", "(Tensor) The output tensor, shape same to X");
AddOutput("Pivots",
"Stores all the intermediate transpositions of rows. shape of "
"(*,min(m,n))");
AddOutput("Infos",
"(Tensor) This is a tensor of size (*) where non-zero values "
"indicate whether factorization for the matrix has succeeded");
AddAttr<bool>("pivots", "Whether pivoting is done").SetDefault(true);
}
};
class LUOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.GetPlace());
}
};
class LUOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext *ctx) const override {
auto var_type = ctx->GetInputType("X", 0);
auto data_type = ctx->GetInputDataType("X", 0);
ctx->SetOutputType("Out", var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType("Out", data_type, framework::ALL_ELEMENTS);
ctx->SetOutputType("Pivots", var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType(
"Pivots", framework::proto::VarType::INT32, framework::ALL_ELEMENTS);
ctx->SetOutputType("Infos", var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType(
"Infos", framework::proto::VarType::INT32, framework::ALL_ELEMENTS);
}
};
template <typename T>
class LUOpGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("lu_grad");
retv->SetInput("X", this->Input("X"));
retv->SetInput("Out", this->Output("Out"));
retv->SetInput("Pivots", this->Output("Pivots"));
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
retv->SetAttrMap(this->Attrs());
}
};
class LUGradOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext *ctx) const override {
auto var_type = ctx->GetInputType("X", 0);
auto data_type = ctx->GetInputDataType("X", 0);
ctx->SetOutputType(
framework::GradVarName("X"), var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType(
framework::GradVarName("X"), data_type, framework::ALL_ELEMENTS);
}
};
class LUGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto dtype = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(dtype, ctx.GetPlace());
}
};
DECLARE_INPLACE_OP_INFERER(LUOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(LUGradOpInplaceInferer,
{framework::GradVarName("Out"),
framework::GradVarName("X")});
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(lu,
LUInferMetaFunctor,
PD_INFER_META(phi::LUInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(lu_grad,
LUGradInferMetaFunctor,
PD_INFER_META(phi::LUGradInferMeta));
REGISTER_OPERATOR(lu,
ops::LUOp,
ops::LUOpMaker,
ops::LUOpVarTypeInference,
ops::LUOpGradMaker<paddle::framework::OpDesc>,
ops::LUOpGradMaker<paddle::imperative::OpBase>,
LUInferMetaFunctor);
REGISTER_OPERATOR(lu_grad,
ops::LUGradOp,
ops::LUGradOpVarTypeInference,
LUGradInferMetaFunctor);
......@@ -1040,6 +1040,16 @@
func : logsigmoid_grad
inplace : (out_grad -> x_grad)
- backward_op : lu_grad
forward : lu (Tensor x, bool pivot = true) -> Tensor(out), Tensor(pivots), Tensor(infos)
args : (Tensor x, Tensor out, Tensor pivots, Tensor out_grad, bool pivot)
output : Tensor(x_grad)
infer_meta :
func : LUGradInferMeta
kernel :
func : lu_grad
inplace : (out_grad -> x_grad)
- backward_op : lu_unpack_grad
forward : lu_unpack (Tensor x, Tensor y, bool unpack_ludata = true, bool unpack_pivots = true) -> Tensor(pmat), Tensor(l), Tensor(u)
args : (Tensor x, Tensor y, Tensor l, Tensor u, Tensor pmat, Tensor l_grad, Tensor u_grad, bool unpack_ludata, bool unpack_pivots)
......
......@@ -522,15 +522,6 @@
kernel :
func : logsumexp_grad
- backward_op : lu_grad
forward : lu (Tensor x, bool pivot) -> Tensor(out), Tensor(pivots), Tensor(infos)
args : (Tensor x, Tensor out, Tensor pivots, Tensor out_grad, bool pivot)
output : Tensor(x_grad)
infer_meta :
func : LUGradInferMeta
kernel :
func : lu_grad
- backward_op : matmul_double_grad
forward : matmul_grad (Tensor x, Tensor y, Tensor grad_out, bool transpose_x=false, bool transpose_y=false) -> Tensor(grad_x), Tensor(grad_y)
args : (Tensor x, Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, bool transpose_x=false, bool transpose_y=false)
......
......@@ -745,15 +745,6 @@
func : lstsq
data_type : x
- op : lu
args : (Tensor x, bool pivot)
output : Tensor(out), Tensor(pivots), Tensor(infos)
infer_meta :
func : LUInferMeta
kernel :
func : lu
backward : lu_grad
- op : matmul
args : (Tensor x, Tensor y, bool transpose_x = false, bool transpose_y = false)
output : Tensor
......
......@@ -2366,6 +2366,15 @@
outputs :
{boxes : Boxes, scores : Scores}
- op: lu
backward: lu_grad
inputs:
x: X
outputs:
{out: Out, pivots : Pivots, infos : Infos}
attrs:
pivot : pivots
- op: sigmoid_cross_entropy_with_logits
backward: sigmoid_cross_entropy_with_logits_grad
inputs :
......
......@@ -1159,6 +1159,17 @@
func : logsigmoid
backward : logsigmoid_grad
- op : lu
args : (Tensor x, bool pivot = true)
output : Tensor(out), Tensor(pivots), Tensor(infos)
infer_meta :
func : LUInferMeta
kernel :
func : lu
data_type : x
inplace : (x -> out)
backward : lu_grad
- op : lu_unpack
args : (Tensor x, Tensor y, bool unpack_ludata = true, bool unpack_pivots = true)
output : Tensor(pmat), Tensor(l), Tensor(u)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature LUOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("lu", {"X"}, {"pivots"}, {"Out", "Pivots", "Infos"});
}
KernelSignature LUGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"lu_grad", {"X", "Out", "Pivots", "Out@GRAD"}, {"pivots"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(lu, phi::LUOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(lu_grad, phi::LUGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册