未验证 提交 ab754417 编写于 作者: W Wang Xin 提交者: GitHub

add autogen code support for reverse op (#52701)

* add autogen code support for reverse op

* bug fixed
上级 c4e1fcba
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class ReverseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
};
class ReverseOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext* ctx) const override {
ctx->SetOutputType("Out", ctx->GetInputType("X"));
ctx->SetOutputDataType("Out", ctx->GetInputDataType("X"));
}
};
class ReverseOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "The phi::DenseTensor to be flipped.");
AddOutput("Out", "The phi::DenseTensor after flipping.");
AddAttr<std::vector<int>>(
"axis", "The axises that along which order of elements is reversed.")
.SupportTensor();
AddComment(R"DOC(
Reverse Operator.
Reverse the order of elements in the input phi::DenseTensor along given axises.
Case 1:
Given
X = [[1, 2, 3, 4, 5]
[6, 7, 8, 9, 10]
[11, 12, 13, 14, 15]],
and
axis = [0],
we get:
Out = [[11, 12, 13, 14, 15]
[6, 7, 8, 9, 10]
[1, 2, 3, 4, 5]].
Case 2:
Given
X = [[[1, 2, 3, 4]
[5, 6, 7, 8]]
[[9, 10, 11, 12]
[13, 14, 15, 16]]],
and
axis = [0, 2],
we get:
Out = [[[12, 11, 10, 9]
[16, 15, 14, 13]]
[[4, 3, 2, 1]
[8, 7, 6, 5]]],
)DOC");
}
};
template <typename T>
class ReverseGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("reverse");
grad_op->SetInput("X", this->OutputGrad("Out"));
grad_op->SetOutput("Out", this->InputGrad("X"));
grad_op->SetAttr("axis", this->GetAttr("axis"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(reverse,
ReverseInferShapeFunctor,
PD_INFER_META(phi::ReverseInferMeta));
REGISTER_OPERATOR(reverse,
ops::ReverseOp,
ops::ReverseOpMaker,
ops::ReverseGradMaker<paddle::framework::OpDesc>,
ops::ReverseGradMaker<paddle::imperative::OpBase>,
ops::ReverseOpVarTypeInference,
ReverseInferShapeFunctor);
REGISTER_OPERATOR(reverse_grad, ops::ReverseOp, ops::ReverseOpVarTypeInference);
...@@ -1328,6 +1328,12 @@ ...@@ -1328,6 +1328,12 @@
kernel : kernel :
func : renorm_grad func : renorm_grad
- backward_op : reverse_grad
forward : reverse (Tensor x, IntArray axis) -> Tensor(out)
args : (Tensor out_grad, IntArray axis)
output : Tensor(x_grad)
invoke : reverse(out_grad, axis)
- backward_op : roll_grad - backward_op : roll_grad
forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out) forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis) args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
......
...@@ -884,12 +884,6 @@ ...@@ -884,12 +884,6 @@
backward : reshape_double_grad backward : reshape_double_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_op : reverse_grad
forward : reverse (Tensor x, IntArray axis) -> Tensor(out)
args : (Tensor out_grad, IntArray axis)
output : Tensor(x_grad)
invoke : reverse(out_grad, axis)
- backward_op : rnn_grad - backward_op : rnn_grad
forward : rnn (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test) -> Tensor(out), Tensor(dropout_state_out), Tensor[](state), Tensor(reserve) forward : rnn (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test) -> Tensor(out), Tensor(dropout_state_out), Tensor[](state), Tensor(reserve)
args : (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor out, Tensor dropout_state_out, Tensor reserve, Tensor out_grad, Tensor[] state_grad, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test) args : (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor out, Tensor dropout_state_out, Tensor reserve, Tensor out_grad, Tensor[] state_grad, float dropout_prob, bool is_bidirec, int input_size, int hidden_size, int num_layers, str mode, int seed, bool is_test)
......
...@@ -1133,15 +1133,6 @@ ...@@ -1133,15 +1133,6 @@
intermediate : xshape intermediate : xshape
backward: reshape_grad backward: reshape_grad
- op : reverse
args : (Tensor x, IntArray axis)
output : Tensor
infer_meta :
func : ReverseInferMeta
kernel :
func : reverse
backward : reverse_grad
- op : rmsprop_ - op : rmsprop_
args : (Tensor param, Tensor mean_square, Tensor grad, Tensor moment, Tensor learning_rate, Tensor mean_grad, Tensor master_param, float epsilon, float decay, float momentum, bool centered, bool multi_precision) args : (Tensor param, Tensor mean_square, Tensor grad, Tensor moment, Tensor learning_rate, Tensor mean_grad, Tensor master_param, float epsilon, float decay, float momentum, bool centered, bool multi_precision)
output : Tensor(param_out), Tensor(moment_out), Tensor(mean_square_out), Tensor(mean_grad_out), Tensor(master_param_out) output : Tensor(param_out), Tensor(moment_out), Tensor(mean_square_out), Tensor(mean_grad_out), Tensor(master_param_out)
......
...@@ -1751,6 +1751,17 @@ ...@@ -1751,6 +1751,17 @@
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_quantizer = false] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_quantizer = false]
- op : reverse
inputs:
x : X
outputs:
out : Out
int_array:
axis :
data_type : int
support_tensor : true
manual_signature : [reverse]
- op : roll - op : roll
backward : roll_grad backward : roll_grad
inputs : inputs :
......
...@@ -1436,6 +1436,16 @@ ...@@ -1436,6 +1436,16 @@
func : renorm func : renorm
backward : renorm_grad backward : renorm_grad
- op : reverse
args : (Tensor x, IntArray axis)
output : Tensor
infer_meta :
func : ReverseInferMeta
kernel :
func : reverse
data_type : x
backward : reverse_grad
- op : roll - op : roll
args : (Tensor x, IntArray shifts={}, int64_t[] axis={}) args : (Tensor x, IntArray shifts={}, int64_t[] axis={})
output : Tensor(out) output : Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册