未验证 提交 a2d3c335 编写于 作者: 张春乔 提交者: GitHub

support auto generate for cumprod (#52047)

* mv cumprod

* add attrs

* Update backward.yaml

* Update backward.yaml
上级 6d0fa6f2
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class CumprodOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class CumprodOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor), The input tensor of cumprod op.");
AddOutput("Out", "(Tensor), The output tensor of cumprod op.");
AddAttr<int>(
"dim",
"(int), The dim along which the input tensors will be cumproded");
AddComment(
R"DOC(Cumprod operator. Return the cumprod results of the input elements along the dim.
For example, if input X is a tensor with rank 1 and N elements, the output will also be a tensor
with rank 1 and N elements, and elements y[i] = x[0] * x[1] * x[2] *...* x[i] (0<=i<N))DOC");
}
};
template <typename T>
class CumprodGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("cumprod_grad");
grad_op->SetInput("X", this->Input("X"));
grad_op->SetInput("Out", this->Output("Out"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
grad_op->SetAttrMap(this->Attrs());
}
};
class CumprodGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "CumprodGrad");
OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "CumprodGrad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
"framework::GradVarName(\"Out\")",
"CumprodGrad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")),
"Output",
"framework::GradVarName(\"X\")",
"CumprodGrad");
ctx->ShareDim(framework::GradVarName("Out"), framework::GradVarName("X"));
ctx->ShareLoD(framework::GradVarName("Out"), framework::GradVarName("X"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(cumprod,
CumprodInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMetaCheckAxis));
REGISTER_OPERATOR(cumprod,
ops::CumprodOp,
ops::CumprodOpMaker,
ops::CumprodGradOpMaker<paddle::framework::OpDesc>,
ops::CumprodGradOpMaker<paddle::imperative::OpBase>,
CumprodInferShapeFunctor);
REGISTER_OPERATOR(cumprod_grad, ops::CumprodGradOp);
...@@ -337,6 +337,16 @@ ...@@ -337,6 +337,16 @@
func : cross_grad func : cross_grad
data_type : out_grad data_type : out_grad
- backward_op : cumprod_grad
forward : cumprod (Tensor x, int dim) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int dim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : cumprod_grad
- backward_op : det_grad - backward_op : det_grad
forward : det (Tensor x) -> Tensor(out) forward : det (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad) args : (Tensor x, Tensor out, Tensor out_grad)
......
...@@ -273,16 +273,6 @@ ...@@ -273,16 +273,6 @@
data_type : softmax data_type : softmax
inplace : (softmax -> input_grad) inplace : (softmax -> input_grad)
- backward_op : cumprod_grad
forward : cumprod (Tensor x, int dim) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int dim)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : cumprod_grad
- backward_op : cumsum_grad - backward_op : cumsum_grad
forward : cumsum(Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out) forward : cumsum(Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse) args : (Tensor x, Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse)
......
...@@ -388,15 +388,6 @@ ...@@ -388,15 +388,6 @@
data_type : input data_type : input
backward : cross_entropy_with_softmax_grad backward : cross_entropy_with_softmax_grad
- op : cumprod
args : (Tensor x, int dim)
output : Tensor(out)
infer_meta :
func : UnchangedInferMetaCheckAxis
kernel :
func : cumprod
backward : cumprod_grad
- op : cumsum - op : cumsum
args : (Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) args : (Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse)
output : Tensor(out) output : Tensor(out)
......
...@@ -398,6 +398,15 @@ ...@@ -398,6 +398,15 @@
outputs : outputs :
out : Out out : Out
- op : cumprod
backward : cumprod_grad
inputs :
x : X
attrs :
dim : dim
outputs :
out : Out
- op : cumsum - op : cumsum
backward: cumsum_grad backward: cumsum_grad
inputs : inputs :
......
...@@ -320,6 +320,15 @@ ...@@ -320,6 +320,15 @@
data_type : x data_type : x
backward : cross_grad backward : cross_grad
- op : cumprod
args : (Tensor x, int dim)
output : Tensor(out)
infer_meta :
func : UnchangedInferMetaCheckAxis
kernel :
func : cumprod
backward : cumprod_grad
- op : det - op : det
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature CumprodGradGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"cumprod_grad", {"X", "Out", "Out@GRAD"}, {"dim"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(cumprod_grad, phi::CumprodGradGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册