未验证 提交 3065fa2c 编写于 作者: W Wang Xin 提交者: GitHub

add autogen code support for index_add op (#51887)

* add autogen code for index_add op

* bug fixed
上级 8ef020c1
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class IndexAddOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.GetPlace());
}
};
class IndexAddOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor), "
"the input feature data of IndexAddOp, dtype should be"
"bool, int32, int64, float16, float32, float64.");
AddInput("Index",
"(Tensor), the 1-D tensor containing the indices to index.");
AddInput("AddValue", "(Tensor), the tensor containing values to add.");
AddOutput(
"Out",
"(Tensor),"
" the output of IndexAddOp, whose dtype and shape are the same as X.");
AddAttr<int>("axis", "the dimension in which we index.").SetDefault(0);
AddComment(R"DOC(
IndexAdd operator
Add the elements of the input tensor with value
by selecting the indices in the order given in index.
This operator also supports inplace modification.
)DOC");
}
};
template <typename T>
class IndexAddGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> op) const override {
op->SetType("index_add_grad");
op->SetInput("Index", this->Input("Index"));
op->SetInput("AddValue", this->Input("AddValue"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetAttrMap(this->Attrs());
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("AddValue"),
this->InputGrad("AddValue"));
}
};
class IndexAddGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.GetPlace());
}
};
DECLARE_INPLACE_OP_INFERER(IndexAddInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(IndexAddGradInplaceInferer,
{framework::GradVarName("Out"),
framework::GradVarName("X")});
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(index_add,
IndexAddInferShapeFunctor,
PD_INFER_META(phi::IndexAddInferMeta));
REGISTER_OPERATOR(index_add,
ops::IndexAddOp,
ops::IndexAddOpMaker,
ops::IndexAddGradMaker<paddle::framework::OpDesc>,
ops::IndexAddGradMaker<paddle::imperative::OpBase>,
ops::IndexAddInplaceInferer,
IndexAddInferShapeFunctor);
DECLARE_INFER_SHAPE_FUNCTOR(index_add_grad,
IndexAddGradInferShapeFunctor,
PD_INFER_META(phi::IndexAddGradInferMeta));
REGISTER_OPERATOR(index_add_grad,
ops::IndexAddGradOp,
ops::IndexAddGradInplaceInferer,
IndexAddGradInferShapeFunctor);
...@@ -689,6 +689,17 @@ ...@@ -689,6 +689,17 @@
func : imag_grad func : imag_grad
data_type : complex(out_grad) data_type : complex(out_grad)
- backward_op : index_add_grad
forward : index_add(Tensor x, Tensor index, Tensor add_value, int axis=0) -> Tensor(out)
args : (Tensor index, Tensor add_value, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(add_value_grad)
infer_meta :
func : IndexAddGradInferMeta
kernel :
func : index_add_grad
data_type : out_grad
inplace : (out_grad -> x_grad)
- backward_op : index_sample_grad - backward_op : index_sample_grad
forward : index_sample (Tensor x, Tensor index) -> Tensor(out) forward : index_sample (Tensor x, Tensor index) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad) args : (Tensor x, Tensor index, Tensor out_grad)
......
...@@ -574,17 +574,6 @@ ...@@ -574,17 +574,6 @@
kernel : kernel :
func : huber_loss_grad func : huber_loss_grad
- backward_op : index_add_grad
forward : index_add(Tensor x, Tensor index, Tensor add_value, int axis) -> Tensor(out)
args : (Tensor index, Tensor add_value, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(add_value_grad)
infer_meta :
func : IndexAddGradInferMeta
kernel :
func : index_add_grad
data_type : out_grad
inplace : (out_grad -> x_grad)
- backward_op : instance_norm_double_grad - backward_op : instance_norm_double_grad
forward : instance_norm_grad(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, float epsilon) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias) forward : instance_norm_grad(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, float epsilon) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args : (Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float epsilon) args : (Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float epsilon)
......
...@@ -831,17 +831,6 @@ ...@@ -831,17 +831,6 @@
func : increment func : increment
inplace : (x -> out) inplace : (x -> out)
- op : index_add
args : (Tensor x, Tensor index, Tensor add_value, int axis)
output : Tensor(out)
infer_meta :
func : IndexAddInferMeta
kernel :
func : index_add
data_type : x
inplace : (x -> out)
backward : index_add_grad
- op : instance_norm - op : instance_norm
args : (Tensor x, Tensor scale, Tensor bias, float epsilon) args : (Tensor x, Tensor scale, Tensor bias, float epsilon)
output : Tensor(y), Tensor(saved_mean), Tensor(saved_variance) output : Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
......
...@@ -882,6 +882,12 @@ ...@@ -882,6 +882,12 @@
outputs : outputs :
out : Out out : Out
- op : index_add
inputs :
{x : X, index : Index, add_value : AddValue}
outputs :
out : Out
- op : index_sample - op : index_sample
inputs : inputs :
{x : X, index : Index} {x : X, index : Index}
......
...@@ -710,6 +710,17 @@ ...@@ -710,6 +710,17 @@
func : imag func : imag
backward : imag_grad backward : imag_grad
- op : index_add
args : (Tensor x, Tensor index, Tensor add_value, int axis = 0)
output : Tensor(out)
infer_meta :
func : IndexAddInferMeta
kernel :
func : index_add
data_type : x
inplace : (x -> out)
backward : index_add_grad
- op : index_sample - op : index_sample
args : (Tensor x, Tensor index) args : (Tensor x, Tensor index)
output : Tensor output : Tensor
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature IndexAddOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"index_add", {"X", "Index", "AddValue"}, {"axis"}, {"Out"});
}
KernelSignature IndexAddGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("index_add_grad",
{"Index", "AddValue", "Out@GRAD"},
{"axis"},
{"X@GRAD", "AddValue@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(index_add, phi::IndexAddOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(index_add_grad, phi::IndexAddGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册